[kernel] r12347 - in people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian: . config-update patches-update/features/all/xen patches-update/series

Bastian Blank waldi at alioth.debian.org
Sat Oct 18 14:50:00 UTC 2008


Author: waldi
Date: Sat Oct 18 14:44:52 2008
New Revision: 12347

Log:
* debian/changelog: Update.
* debian/config-update/defines: Set abiname to xen-3.3-1.
* debian/patches-update/series/1-extra: Update patches.
* debian/patches-update/series/13etch4-extra: Remove.
* debian/patches-update/features/all/xen/console-hvc-overtake.patch,
  debian/patches-update/features/all/xen/remove-4gb-warning.patch: Update.
* debian/patches-update/features/all/xen/xen-748f324a4b2d62d89fe40c4aa52861977e1a2cae.patch:
  Add.
* debian/patches-update/features/all/xen/xen-3.1-15467.patch: Remove.
* debian/rules.real: Set correct location of vmlinuz file.


Added:
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-748f324a4b2d62d89fe40c4aa52861977e1a2cae.patch   (contents, props changed)
      - copied, changed from r12345, /people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-3.1-15467.patch
Removed:
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-3.1-15467.patch
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/series/13etch4-extra
Modified:
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/changelog
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/config-update/defines
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/console-hvc-overtake.patch
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/remove-4gb-warning.patch
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/series/1-extra
   people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/rules.real

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/changelog
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/changelog	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/changelog	Sat Oct 18 14:44:52 2008
@@ -1,10 +1,11 @@
-linux-2.6.18-xen-3 (2.6.18.dfsg.1-23+xen.1) kernel-waldi-xen-extra; urgency=low
+linux-2.6.18-xen-3.3 (2.6.18.dfsg.1-23+xen.1) kernel-waldi-xen-extra; urgency=low
 
-  * Update Xen patch to 3.1 branch, 3.1.1 release.
-  * Allow overtaking of hvc for the console.
+  * Update Xen patch to revision 748f324a4b2d62d89fe40c4aa52861977e1a2cae of
+    the linux-2.6.18-xen repository, 3.3 release.
+  * Overtake hvc console by default.
   * Remove 4gb segments warning completely.
 
- -- Bastian Blank <waldi at debian.org>  Fri, 17 Oct 2008 15:12:44 +0200
+ -- Bastian Blank <waldi at debian.org>  Sat, 18 Oct 2008 11:14:59 +0200
 
 linux-2.6 (2.6.18.dfsg.1-23) stable; urgency=high
 

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/config-update/defines
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/config-update/defines	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/config-update/defines	Sat Oct 18 14:44:52 2008
@@ -1,5 +1,5 @@
 [abi]
-abiname: xen-3.1-2
+abiname: xen-3.3-1
 
 [base]
 arches:

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/console-hvc-overtake.patch
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/console-hvc-overtake.patch	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/console-hvc-overtake.patch	Sat Oct 18 14:44:52 2008
@@ -6,16 +6,18 @@
   *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
   *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
   *  'xencons=xvc'  [XC_XVC]:     Console attached to '/dev/xvc0'.
+- *  default:                     XC_XVC
 + *  'xencons=hvc'  [XC_HVC]:     Console attached to '/dev/hvc0'.
-  *  default:                     DOM0 -> XC_SERIAL ; all others -> XC_TTY.
++ *  default:                     XC_HVC
   * 
   * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
   * warnings from standard distro startup scripts.
   */
  static enum {
 -	XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
+-} xc_mode = XC_XVC;
 +	XC_OFF, XC_TTY, XC_SERIAL, XC_XVC, XC_HVC
- } xc_mode;
++} xc_mode = XC_HVC;
  static int xc_num = -1;
  
  /* /dev/xvc0 device number allocated by lanana.org. */

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/remove-4gb-warning.patch
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/remove-4gb-warning.patch	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/remove-4gb-warning.patch	Sat Oct 18 14:44:52 2008
@@ -1,7 +1,7 @@
 --- a/arch/i386/kernel/Makefile
 +++ b/arch/i386/kernel/Makefile
 @@ -94,1 +94,0 @@
--obj-y += fixup.o
+-obj-$(CONFIG_XEN)		+= fixup.o
 --- a/arch/i386/kernel/entry-xen.S
 +++ b/arch/i386/kernel/entry-xen.S
 @@ -1205,7 +1205,0 @@

Copied: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-748f324a4b2d62d89fe40c4aa52861977e1a2cae.patch (from r12345, /people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-3.1-15467.patch)
==============================================================================
--- /people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-3.1-15467.patch	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/features/all/xen/xen-748f324a4b2d62d89fe40c4aa52861977e1a2cae.patch	Sat Oct 18 14:44:52 2008
@@ -1,32 +1,12 @@
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/boot-xen/Makefile tmp-linux-2.6-xen.patch/arch/i386/boot-xen/Makefile
---- pristine-linux-2.6.18.2/arch/i386/boot-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/boot-xen/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,21 @@
-+
-+OBJCOPYFLAGS := -g --strip-unneeded
-+
-+vmlinuz: vmlinux-stripped FORCE
-+	$(call if_changed,gzip)
-+
-+vmlinux-stripped: vmlinux FORCE
-+	$(call if_changed,objcopy)
-+
-+INSTALL_ROOT := $(patsubst %/boot,%,$(INSTALL_PATH))
-+
-+XINSTALL_NAME ?= $(KERNELRELEASE)
-+install:
-+	mkdir -p $(INSTALL_ROOT)/boot
-+	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-+	rm -f $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0644 vmlinuz $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0644 vmlinux $(INSTALL_ROOT)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/Kconfig tmp-linux-2.6-xen.patch/arch/i386/Kconfig
---- pristine-linux-2.6.18.2/arch/i386/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -16,6 +16,7 @@ config X86_32
+diff -r d894e36cfc30 -r 0aa021803deb .hgtags
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/.hgtags	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++9e4b868027c659a2a3221eba365f3be782fde4a6 xen-3.2.0
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/Kconfig
+--- a/arch/i386/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -16,6 +16,7 @@
  
  config GENERIC_TIME
  	bool
@@ -34,23 +14,25 @@
  	default y
  
  config LOCKDEP_SUPPORT
-@@ -103,6 +104,15 @@ config X86_PC
+@@ -102,6 +103,17 @@
+ 	bool "PC-compatible"
  	help
  	  Choose this option if your computer is a standard PC or compatible.
- 
++
 +config X86_XEN
 +	bool "Xen-compatible"
++	select XEN
++	select X86_PAE
 +	select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
 +	select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
 +	select SWIOTLB
 +	help
 +	  Choose this option if you plan to run this kernel on top of the
 +	  Xen Hypervisor.
-+
+ 
  config X86_ELAN
  	bool "AMD Elan"
- 	help
-@@ -213,6 +223,7 @@ source "arch/i386/Kconfig.cpu"
+@@ -213,6 +225,7 @@
  
  config HPET_TIMER
  	bool "HPET Timer Support"
@@ -58,7 +40,7 @@
  	help
  	  This enables the use of the HPET for the kernel's internal timer.
  	  HPET is the next generation timer replacing legacy 8254s.
-@@ -263,7 +274,7 @@ source "kernel/Kconfig.preempt"
+@@ -263,7 +276,7 @@
  
  config X86_UP_APIC
  	bool "Local APIC support on uniprocessors"
@@ -67,7 +49,7 @@
  	help
  	  A local APIC (Advanced Programmable Interrupt Controller) is an
  	  integrated interrupt controller in the CPU. If you have a single-CPU
-@@ -288,12 +299,12 @@ config X86_UP_IOAPIC
+@@ -288,12 +301,12 @@
  
  config X86_LOCAL_APIC
  	bool
@@ -82,7 +64,7 @@
  	default y
  
  config X86_VISWS_APIC
-@@ -303,7 +314,7 @@ config X86_VISWS_APIC
+@@ -303,7 +316,7 @@
  
  config X86_MCE
  	bool "Machine Check Exception"
@@ -91,7 +73,16 @@
  	---help---
  	  Machine Check Exception support allows the processor to notify the
  	  kernel if it detects a problem (e.g. overheating, component failure).
-@@ -402,6 +413,7 @@ config X86_REBOOTFIXUPS
+@@ -384,7 +397,7 @@
+ 
+ config X86_REBOOTFIXUPS
+ 	bool "Enable X86 board specific fixups for reboot"
+-	depends on X86
++	depends on !X86_XEN
+ 	default n
+ 	---help---
+ 	  This enables chipset and/or board specific fixups to be done
+@@ -402,6 +415,7 @@
  
  config MICROCODE
  	tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
@@ -99,26 +90,18 @@
  	---help---
  	  If you say Y here and also to "/dev file system support" in the
  	  'File systems' section, you will be able to update the microcode on
-@@ -419,6 +431,7 @@ config MICROCODE
- 
- config X86_MSR
- 	tristate "/dev/cpu/*/msr - Model-specific register support"
-+	depends on !X86_XEN
- 	help
- 	  This device gives privileged processes access to the x86
- 	  Model-Specific Registers (MSRs).  It is a character device with
-@@ -434,6 +447,10 @@ config X86_CPUID
+@@ -433,6 +447,10 @@
+ 	  be executed on a specific processor.  It is a character device
  	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
  	  /dev/cpu/31/cpuid.
- 
++
 +config SWIOTLB
 +	bool
 +	default n
-+
+ 
  source "drivers/firmware/Kconfig"
  
- choice
-@@ -616,6 +633,7 @@ config HIGHPTE
+@@ -616,6 +634,7 @@
  
  config MATH_EMULATION
  	bool "Math emulation"
@@ -126,16 +109,15 @@
  	---help---
  	  Linux can emulate a math coprocessor (used for floating point
  	  operations) if you don't have one. 486DX and Pentium processors have
-@@ -641,6 +659,8 @@ config MATH_EMULATION
+@@ -641,6 +660,7 @@
  
  config MTRR
  	bool "MTRR (Memory Type Range Register) support"
 +	depends on !XEN_UNPRIVILEGED_GUEST
-+	default y if X86_XEN
  	---help---
  	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
  	  the Memory Type Range Registers (MTRRs) may be used to control
-@@ -675,7 +695,7 @@ config MTRR
+@@ -675,7 +695,7 @@
  
  config EFI
  	bool "Boot from EFI support"
@@ -144,7 +126,7 @@
  	default n
  	---help---
  	This enables the the kernel to boot on EFI platforms using
-@@ -693,7 +713,7 @@ config EFI
+@@ -693,7 +713,7 @@
  
  config IRQBALANCE
   	bool "Enable kernel irq balancing"
@@ -153,7 +135,7 @@
  	default y
  	help
   	  The default yes will allow the kernel to do irq load balancing.
-@@ -741,7 +761,7 @@ source kernel/Kconfig.hz
+@@ -741,7 +761,7 @@
  
  config KEXEC
  	bool "kexec system call (EXPERIMENTAL)"
@@ -162,7 +144,15 @@
  	help
  	  kexec is a system call that implements the ability to shutdown your
  	  current kernel, and to start another kernel.  It is like a reboot
-@@ -793,6 +813,7 @@ config HOTPLUG_CPU
+@@ -760,6 +780,7 @@
+ 	bool "kernel crash dumps (EXPERIMENTAL)"
+ 	depends on EXPERIMENTAL
+ 	depends on HIGHMEM
++	depends on !XEN
+ 	help
+ 	  Generate crash dump after being started by kexec.
+ 
+@@ -793,6 +814,7 @@
  
  config COMPAT_VDSO
  	bool "Compat VDSO support"
@@ -170,16 +160,15 @@
  	default y
  	help
  	  Map the VDSO to the predictable old-style address too.
-@@ -810,18 +831,20 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+@@ -810,18 +832,18 @@
  	depends on HIGHMEM
  
  menu "Power management options (ACPI, APM)"
 -	depends on !X86_VOYAGER
 +	depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
  
-+if !X86_XEN
- source kernel/power/Kconfig
-+endif
+-source kernel/power/Kconfig
++source "kernel/power/Kconfig"
  
  source "drivers/acpi/Kconfig"
  
@@ -194,7 +183,7 @@
  	---help---
  	  APM is a BIOS specification for saving power using several different
  	  techniques. This is mostly useful for battery powered laptops with
-@@ -1006,6 +1029,7 @@ choice
+@@ -1006,12 +1028,20 @@
  
  config PCI_GOBIOS
  	bool "BIOS"
@@ -202,21 +191,20 @@
  
  config PCI_GOMMCONFIG
  	bool "MMConfig"
-@@ -1013,6 +1037,13 @@ config PCI_GOMMCONFIG
+ 
  config PCI_GODIRECT
  	bool "Direct"
- 
++
 +config PCI_GOXEN_FE
 +	bool "Xen PCI Frontend"
 +	depends on X86_XEN
 +	help
 +	  The PCI device frontend driver allows the kernel to import arbitrary
 +	  PCI devices from a PCI backend to support PCI driver domains.
-+
+ 
  config PCI_GOANY
  	bool "Any"
- 
-@@ -1020,7 +1051,7 @@ endchoice
+@@ -1020,7 +1050,7 @@
  
  config PCI_BIOS
  	bool
@@ -225,13 +213,14 @@
  	default y
  
  config PCI_DIRECT
-@@ -1033,6 +1064,18 @@ config PCI_MMCONFIG
+@@ -1033,6 +1063,19 @@
  	depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
  	default y
  
 +config XEN_PCIDEV_FRONTEND
 +	bool
 +	depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
++	select HOTPLUG
 +	default y
 +
 +config XEN_PCIDEV_FE_DEBUG
@@ -244,7 +233,7 @@
  source "drivers/pci/pcie/Kconfig"
  
  source "drivers/pci/Kconfig"
-@@ -1043,7 +1086,7 @@ config ISA_DMA_API
+@@ -1043,7 +1086,7 @@
  
  config ISA
  	bool "ISA support"
@@ -253,7 +242,7 @@
  	help
  	  Find out whether you have ISA slots on your motherboard.  ISA is the
  	  name of a bus system, i.e. the way the CPU talks to the other stuff
-@@ -1070,7 +1113,7 @@ config EISA
+@@ -1070,7 +1113,7 @@
  source "drivers/eisa/Kconfig"
  
  config MCA
@@ -262,7 +251,7 @@
  	default y if X86_VOYAGER
  	help
  	  MicroChannel Architecture is found in some IBM PS/2 machines and
-@@ -1146,6 +1189,8 @@ source "security/Kconfig"
+@@ -1146,6 +1189,8 @@
  
  source "crypto/Kconfig"
  
@@ -271,7 +260,7 @@
  source "lib/Kconfig"
  
  #
-@@ -1171,7 +1216,7 @@ config X86_SMP
+@@ -1171,7 +1216,7 @@
  
  config X86_HT
  	bool
@@ -280,10 +269,13 @@
  	default y
  
  config X86_BIOS_REBOOT
-@@ -1184,6 +1229,16 @@ config X86_TRAMPOLINE
+@@ -1182,6 +1227,17 @@
+ config X86_TRAMPOLINE
+ 	bool
  	depends on X86_SMP || (X86_VOYAGER && SMP)
- 	default y
- 
++	depends on !XEN
++	default y
++
 +config X86_NO_TSS
 +	bool
 +	depends on X86_XEN
@@ -292,23 +284,13 @@
 +config X86_NO_IDT
 +	bool
 +	depends on X86_XEN
-+	default y
-+
- config KTIME_SCALAR
- 	bool
  	default y
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/Kconfig.cpu tmp-linux-2.6-xen.patch/arch/i386/Kconfig.cpu
---- pristine-linux-2.6.18.2/arch/i386/Kconfig.cpu	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/Kconfig.cpu	2007-07-30 16:35:11.000000000 +0200
-@@ -7,7 +7,6 @@ choice
- 
- config M386
- 	bool "386"
--	depends on !UML
- 	---help---
- 	  This is the processor type of your CPU. This information is used for
- 	  optimizing purposes. In order to compile a kernel that can run on
-@@ -252,7 +251,7 @@ config X86_PPRO_FENCE
+ 
+ config KTIME_SCALAR
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/Kconfig.cpu
+--- a/arch/i386/Kconfig.cpu	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/Kconfig.cpu	Wed Sep 10 10:54:08 2008 +0100
+@@ -252,7 +252,7 @@
  
  config X86_F00F_BUG
  	bool
@@ -317,26 +299,17 @@
  	default y
  
  config X86_WP_WORKS_OK
-@@ -302,7 +301,7 @@ config X86_USE_PPRO_CHECKSUM
- 
- config X86_USE_3DNOW
- 	bool
--	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-+	depends on MCYRIXIII || MK7 || MGEODE_LX
- 	default y
- 
- config X86_OOSTORE
-@@ -312,5 +311,5 @@ config X86_OOSTORE
+@@ -312,5 +312,5 @@
  
  config X86_TSC
  	bool
 -	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
 +	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ && !X86_XEN
  	default y
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/Kconfig.debug tmp-linux-2.6-xen.patch/arch/i386/Kconfig.debug
---- pristine-linux-2.6.18.2/arch/i386/Kconfig.debug	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/Kconfig.debug	2007-07-30 16:35:11.000000000 +0200
-@@ -79,6 +79,7 @@ config X86_MPPARSE
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/Kconfig.debug
+--- a/arch/i386/Kconfig.debug	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/Kconfig.debug	Wed Sep 10 10:54:08 2008 +0100
+@@ -79,6 +79,7 @@
  config DOUBLEFAULT
  	default y
  	bool "Enable doublefault exception handler" if EMBEDDED
@@ -344,9 +317,112 @@
  	help
            This option allows trapping of rare doublefault exceptions that
            would otherwise cause a system to silently reboot. Disabling this
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/acpi/boot-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/acpi/boot-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/acpi/boot-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/acpi/boot-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/Makefile
+--- a/arch/i386/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -71,6 +71,10 @@
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT)  := mach-default
+ 
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN)	:= -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN)		:= mach-xen
++
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+ mcore-$(CONFIG_X86_GENERICARCH) := mach-default
+@@ -102,9 +106,20 @@
+ 
+ boot := arch/i386/boot
+ 
+-PHONY += zImage bzImage compressed zlilo bzlilo \
++PHONY += zImage bzImage vmlinuz compressed zlilo bzlilo \
+          zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+ 
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++	-Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++all: vmlinuz
++
++# KBUILD_IMAGE specifies the target image being built
++KBUILD_IMAGE := $(boot)/vmlinuz
++
++vmlinuz: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
++else
+ all: bzImage
+ 
+ # KBUILD_IMAGE specify target image being built
+@@ -124,6 +139,7 @@
+ 
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++endif
+ 
+ install:
+ 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/boot/Makefile
+--- a/arch/i386/boot/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/boot/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -26,7 +26,7 @@
+ #RAMDISK := -DRAMDISK=512
+ 
+ targets		:= vmlinux.bin bootsect bootsect.o \
+-		   setup setup.o zImage bzImage
++		   setup setup.o zImage bzImage vmlinuz vmlinux-stripped
+ subdir- 	:= compressed
+ 
+ hostprogs-y	:= tools/build
+@@ -133,5 +133,13 @@
+ 	cp System.map $(INSTALL_PATH)/
+ 	if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+ 
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++	$(call if_changed,gzip)
++	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++	$(call if_changed,objcopy)
++
+ install:
+ 	sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/Makefile
+--- a/arch/i386/kernel/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -43,6 +43,7 @@
+ EXTRA_AFLAGS   := -traditional
+ 
+ obj-$(CONFIG_SCx200)		+= scx200.o
++obj-$(CONFIG_XEN)		+= fixup.o
+ 
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+@@ -80,5 +81,8 @@
+ 			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
+ 	$(call if_changed,syscall)
+ 
++early_printk-y            += ../../x86_64/kernel/early_printk.o
+ k8-y                      += ../../x86_64/kernel/k8.o
+ 
++disabled-obj-$(CONFIG_XEN) := i8259.o reboot.o smpboot.o
++%/head.o %/head.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/acpi/Makefile
+--- a/arch/i386/kernel/acpi/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/acpi/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -4,5 +4,9 @@
+ 
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y				+= cstate.o processor.o
++ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),)
++obj-$(CONFIG_XEN)		+= processor_extcntl_xen.o
++endif
+ endif
+ 
++disabled-obj-$(CONFIG_XEN)	:= cstate.o wakeup.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/acpi/boot-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/acpi/boot-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,1168 @@
 +/*
 + *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
@@ -1516,20 +1592,379 @@
 +
 +	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/acpi/Makefile tmp-linux-2.6-xen.patch/arch/i386/kernel/acpi/Makefile
---- pristine-linux-2.6.18.2/arch/i386/kernel/acpi/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/acpi/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -6,3 +6,7 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
- obj-y				+= cstate.o processor.o
- endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/acpi/processor.c
+--- a/arch/i386/kernel/acpi/processor.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/acpi/processor.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -62,7 +62,18 @@
+ /* Initialize _PDC data based on the CPU vendor */
+ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++	/* 
++	 * As a work-around, just use cpu0's cpuinfo for all processors.
++	 * Further work is required to expose xen hypervisor interface of
++	 * getting physical cpuinfo to dom0 kernel and then
++	 * arch_acpi_processor_init_pdc can set _PDC parameters according
++	 * to Xen's phys information.
++	 */
++	unsigned int cpu = 0;
++#else
+ 	unsigned int cpu = pr->id;
++#endif /* CONFIG_XEN */
+ 	struct cpuinfo_x86 *c = cpu_data + cpu;
  
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/apic-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/apic-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/apic-xen.c	2007-07-30 16:35:11.000000000 +0200
+ 	pr->pdc = NULL;
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/acpi/processor_extcntl_xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/acpi/processor_extcntl_xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,227 @@
++/*
++ * processor_extcntl_xen.c - interface to notify Xen
++ *
++ *  Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; either version 2 of the License, or (at
++ *  your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful, but
++ *  WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License along
++ *  with this program; if not, write to the Free Software Foundation, Inc.,
++ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
++
++#include <linux/cpufreq.h>
++#include <acpi/processor.h>
++#include <asm/hypercall.h>
++
++static int xen_cx_notifier(struct acpi_processor *pr, int action)
++{
++	int ret, count = 0, i;
++	xen_platform_op_t op = {
++		.cmd			= XENPF_set_processor_pminfo,
++		.interface_version	= XENPF_INTERFACE_VERSION,
++		.u.set_pminfo.id	= pr->acpi_id,
++		.u.set_pminfo.type	= XEN_PM_CX,
++	};
++	struct xen_processor_cx *data, *buf;
++	struct acpi_processor_cx *cx;
++
++	if (action == PROCESSOR_PM_CHANGE)
++		return -EINVAL;
++
++	/* Convert to Xen defined structure and hypercall */
++	buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx),
++			GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	data = buf;
++	for (i = 1; i <= pr->power.count; i++) {
++		cx = &pr->power.states[i];
++		/* Skip invalid cstate entry */
++		if (!cx->valid)
++			continue;
++
++		data->type = cx->type;
++		data->latency = cx->latency;
++		data->power = cx->power;
++		data->reg.space_id = cx->reg.space_id;
++		data->reg.bit_width = cx->reg.bit_width;
++		data->reg.bit_offset = cx->reg.bit_offset;
++		data->reg.access_size = cx->reg.reserved;
++		data->reg.address = cx->reg.address;
++
++		/* Get dependency relationships */
++		if (cx->csd_count) {
++			printk("Wow! _CSD is found. Not support for now!\n");
++			kfree(buf);
++			return -EINVAL;
++		} else {
++			data->dpcnt = 0;
++			set_xen_guest_handle(data->dp, NULL);
++		}
++
++		data++;
++		count++;
++	}
++
++	if (!count) {
++		printk("No available Cx info for cpu %d\n", pr->acpi_id);
++		kfree(buf);
++		return -EINVAL;
++	}
++
++	op.u.set_pminfo.power.count = count;
++	op.u.set_pminfo.power.flags.bm_control = pr->flags.bm_control;
++	op.u.set_pminfo.power.flags.bm_check = pr->flags.bm_check;
++	op.u.set_pminfo.power.flags.has_cst = pr->flags.has_cst;
++	op.u.set_pminfo.power.flags.power_setup_done = pr->flags.power_setup_done;
++
++	set_xen_guest_handle(op.u.set_pminfo.power.states, buf);
++	ret = HYPERVISOR_platform_op(&op);
++	kfree(buf);
++	return ret;
++}
++
++static void convert_pct_reg(struct xen_pct_register *xpct,
++	struct acpi_pct_register *apct)
++{
++	xpct->descriptor = apct->descriptor;
++	xpct->length     = apct->length;
++	xpct->space_id   = apct->space_id;
++	xpct->bit_width  = apct->bit_width;
++	xpct->bit_offset = apct->bit_offset;
++	xpct->reserved   = apct->reserved;
++	xpct->address    = apct->address;
++}
++
++static void convert_pss_states(struct xen_processor_px *xpss, 
++	struct acpi_processor_px *apss, int state_count)
++{
++	int i;
++	for(i=0; i<state_count; i++) {
++		xpss->core_frequency     = apss->core_frequency;
++		xpss->power              = apss->power;
++		xpss->transition_latency = apss->transition_latency;
++		xpss->bus_master_latency = apss->bus_master_latency;
++		xpss->control            = apss->control;
++		xpss->status             = apss->status;
++		xpss++;
++		apss++;
++	}
++}
++
++static void convert_psd_pack(struct xen_psd_package *xpsd,
++	struct acpi_psd_package *apsd)
++{
++	xpsd->num_entries    = apsd->num_entries;
++	xpsd->revision       = apsd->revision;
++	xpsd->domain         = apsd->domain;
++	xpsd->coord_type     = apsd->coord_type;
++	xpsd->num_processors = apsd->num_processors;
++}
++
++static int xen_px_notifier(struct acpi_processor *pr, int action)
++{
++	int ret;
++	xen_platform_op_t op = {
++		.cmd			= XENPF_set_processor_pminfo,
++		.interface_version	= XENPF_INTERFACE_VERSION,
++		.u.set_pminfo.id	= pr->acpi_id,
++		.u.set_pminfo.type	= XEN_PM_PX,
++	};
++	struct xen_processor_performance *perf;
++	struct xen_processor_px *states = NULL;
++	struct acpi_processor_performance *px;
++	struct acpi_psd_package *pdomain;
++
++	/* leave dynamic ppc handle in the future */
++	if (action == PROCESSOR_PM_CHANGE)
++		return 0;
++
++	perf = &op.u.set_pminfo.perf;
++	px = pr->performance;
++
++	perf->flags = XEN_PX_PPC | 
++		      XEN_PX_PCT | 
++		      XEN_PX_PSS | 
++		      XEN_PX_PSD;
++
++	/* ppc */
++	perf->ppc = pr->performance_platform_limit;
++
++	/* pct */
++	convert_pct_reg(&perf->control_register, &px->control_register);
++	convert_pct_reg(&perf->status_register, &px->status_register);
++
++	/* pss */
++	perf->state_count = px->state_count;
++	states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
++	if (!states)
++		return -ENOMEM;
++	convert_pss_states(states, px->states, px->state_count);
++	set_xen_guest_handle(perf->states, states);
++
++	/* psd */
++	pdomain = &px->domain_info;
++	convert_psd_pack(&perf->domain_info, pdomain);
++	if (perf->domain_info.num_processors) {
++		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
++			perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
++		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
++			perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
++		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
++			perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
++	} else
++		perf->shared_type = CPUFREQ_SHARED_TYPE_NONE;
++
++	ret = HYPERVISOR_platform_op(&op);
++	kfree(states);
++	return ret;
++}
++
++static int xen_tx_notifier(struct acpi_processor *pr, int action)
++{
++	return -EINVAL;
++}
++static int xen_hotplug_notifier(struct acpi_processor *pr, int event)
++{
++	return -EINVAL;
++}
++
++static struct processor_extcntl_ops xen_extcntl_ops = {
++	.hotplug		= xen_hotplug_notifier,
++};
++
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
++{
++	unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
++
++	if (pmbits & XEN_PROCESSOR_PM_CX)
++		xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
++	if (pmbits & XEN_PROCESSOR_PM_PX)
++		xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
++	if (pmbits & XEN_PROCESSOR_PM_TX)
++		xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
++
++	*ops = &xen_extcntl_ops;
++}
++EXPORT_SYMBOL(arch_acpi_processor_init_extcntl);
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/acpi/sleep-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/acpi/sleep-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,113 @@
++/*
++ * sleep.c - x86-specific ACPI sleep support.
++ *
++ *  Copyright (C) 2001-2003 Patrick Mochel
++ *  Copyright (C) 2001-2003 Pavel Machek <pavel at suse.cz>
++ */
++
++#include <linux/acpi.h>
++#include <linux/bootmem.h>
++#include <linux/dmi.h>
++#include <linux/cpumask.h>
++
++#include <asm/smp.h>
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++	if (!acpi_wakeup_address)
++		return 1;
++	memcpy((void *)acpi_wakeup_address, &wakeup_start,
++	       &wakeup_end - &wakeup_start);
++	acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++	return 0;
++}
++
++/*
++ * acpi_restore_state - undo effects of acpi_save_state_mem
++ */
++void acpi_restore_state_mem(void)
++{
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page from the first 1MB of memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16MB pages, but not
++ * <1MB pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
++		printk(KERN_ERR
++		       "ACPI: Wakeup code way too big, S3 disabled.\n");
++		return;
++	}
++
++	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++	if (!acpi_wakeup_address)
++		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++	while ((str != NULL) && (*str != '\0')) {
++		if (strncmp(str, "s3_bios", 7) == 0)
++			acpi_video_flags = 1;
++		if (strncmp(str, "s3_mode", 7) == 0)
++			acpi_video_flags |= 2;
++		str = strchr(str, ',');
++		if (str != NULL)
++			str += strspn(str, ", \t");
++	}
++	return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++
++static __init int reset_videomode_after_s3(struct dmi_system_id *d)
++{
++	acpi_video_flags |= 2;
++	return 0;
++}
++
++static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
++	{			/* Reset video mode after returning from ACPI S3 sleep */
++	 .callback = reset_videomode_after_s3,
++	 .ident = "Toshiba Satellite 4030cdt",
++	 .matches = {
++		     DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
++		     },
++	 },
++	{}
++};
++
++static int __init acpisleep_dmi_init(void)
++{
++	dmi_check_system(acpisleep_dmi_table);
++	return 0;
++}
++
++core_initcall(acpisleep_dmi_init);
++#endif /* CONFIG_ACPI_PV_SLEEP */
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/apic-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/apic-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,155 @@
 +/*
 + *	Local APIC handling, local APIC timers
@@ -1686,10 +2121,10 @@
 +
 +	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/asm-offsets.c tmp-linux-2.6-xen.patch/arch/i386/kernel/asm-offsets.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/asm-offsets.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/asm-offsets.c	2007-07-30 16:35:11.000000000 +0200
-@@ -66,9 +66,14 @@ void foo(void)
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/asm-offsets.c
+--- a/arch/i386/kernel/asm-offsets.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/asm-offsets.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -66,9 +66,14 @@
  	OFFSET(pbe_orig_address, pbe, orig_address);
  	OFFSET(pbe_next, pbe, next);
  
@@ -1705,9 +2140,17 @@
  
  	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
  	DEFINE(VDSO_PRELINK, VDSO_PRELINK);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/cpu/common-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/common-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/cpu/common-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/common-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/Makefile
+--- a/arch/i386/kernel/cpu/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/cpu/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -17,3 +17,4 @@
+ 
+ obj-$(CONFIG_MTRR)	+= 	mtrr/
+ obj-$(CONFIG_CPU_FREQ)	+=	cpufreq/
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/common-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/cpu/common-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,743 @@
 +#include <linux/init.h>
 +#include <linux/string.h>
@@ -2301,7 +2744,7 @@
 +#endif
 +}
 +
-+void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
++static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr)
 +{
 +	unsigned long frames[16];
 +	unsigned long va;
@@ -2314,7 +2757,7 @@
 +		make_lowmem_page_readonly(
 +			(void *)va, XENFEAT_writable_descriptor_tables);
 +	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
++	if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8))
 +		BUG();
 +}
 +
@@ -2452,22 +2895,530 @@
 +	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
 +}
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/cpu/Makefile tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/Makefile
---- pristine-linux-2.6.18.2/arch/i386/kernel/cpu/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -17,3 +17,8 @@ obj-$(CONFIG_X86_MCE)	+=	mcheck/
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -46,7 +46,7 @@
+ 
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 2.00.00"
++#define VERSION "version 2.20.00"
+ #include "powernow-k8.h"
+ 
+ /* serialize freq changes  */
+@@ -66,36 +66,15 @@
+ 	return 800 + (fid * 100);
+ }
  
- obj-$(CONFIG_MTRR)	+= 	mtrr/
- obj-$(CONFIG_CPU_FREQ)	+=	cpufreq/
+-
+ /* Return a frequency in KHz, given an input fid */
+ static u32 find_khz_freq_from_fid(u32 fid)
+ {
+ 	return 1000 * find_freq_from_fid(fid);
+ }
+ 
+-/* Return a frequency in MHz, given an input fid and did */
+-static u32 find_freq_from_fiddid(u32 fid, u32 did)
++static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate)
+ {
+-	return 100 * (fid + 0x10) >> did;
+-}
+-
+-static u32 find_khz_freq_from_fiddid(u32 fid, u32 did)
+-{
+-	return 1000 * find_freq_from_fiddid(fid, did);
+-}
+-
+-static u32 find_fid_from_pstate(u32 pstate)
+-{
+-	u32 hi, lo;
+-	rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+-	return lo & HW_PSTATE_FID_MASK;
+-}
+-
+-static u32 find_did_from_pstate(u32 pstate)
+-{
+-	u32 hi, lo;
+-	rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+-	return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++	return data[pstate].frequency;
+ }
+ 
+ /* Return the vco fid for an input fid
+@@ -139,9 +118,7 @@
+ 	if (cpu_family == CPU_HW_PSTATE) {
+ 		rdmsr(MSR_PSTATE_STATUS, lo, hi);
+ 		i = lo & HW_PSTATE_MASK;
+-		rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
+-		data->currfid = lo & HW_PSTATE_FID_MASK;
+-		data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++		data->currpstate = i;
+ 		return 0;
+ 	}
+ 	do {
+@@ -292,7 +269,7 @@
+ static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
+ {
+ 	wrmsr(MSR_PSTATE_CTRL, pstate, 0);
+-	data->currfid = find_fid_from_pstate(pstate);
++	data->currpstate = pstate;
+ 	return 0;
+ }
+ 
+@@ -738,6 +715,7 @@
+ 
+ 		data->numps = psb->numps;
+ 		dprintk("numpstates: 0x%x\n", data->numps);
++		data->starting_core_affinity = cpumask_of_cpu(0);
+ 		return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
+ 	}
+ 	/*
+@@ -758,15 +736,43 @@
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
+ {
+-	if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
++	if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
+ 		return;
+ 
+-	data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
+-	data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+-	data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+-	data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
+-	data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
+-	data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
++	data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
++	data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
++	data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
++	data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
++	data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
++	data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
++}
 +
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/cpu/mtrr/main-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/mtrr/main-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/cpu/mtrr/main-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/mtrr/main-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,197 @@
++static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
++static int preregister_valid = 0;
++
++static int powernow_k8_cpu_preinit_acpi()
++{
++	int i; 
++	struct acpi_processor_performance *data;
++	for_each_possible_cpu(i) {
++		data = kzalloc(sizeof(struct acpi_processor_performance),
++				GFP_KERNEL);
++		if (!data) {
++			int j;
++			for_each_possible_cpu(j) {
++				kfree(acpi_perf_data[j]);
++				acpi_perf_data[j] = NULL;
++			}
++			return -ENODEV;
++		}
++		acpi_perf_data[i] = data;
++	}
++
++	if (acpi_processor_preregister_performance(acpi_perf_data))
++		return -ENODEV;
++	else 
++		preregister_valid = 1;
++	return 0;
+ }
+ 
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+@@ -774,28 +780,29 @@
+ 	struct cpufreq_frequency_table *powernow_table;
+ 	int ret_val;
+ 
+-	if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
++	data->acpi_data = acpi_perf_data[data->cpu];
++	if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
+ 		dprintk("register performance failed: bad ACPI data\n");
+ 		return -EIO;
+ 	}
+ 
+ 	/* verify the data contained in the ACPI structures */
+-	if (data->acpi_data.state_count <= 1) {
++	if (data->acpi_data->state_count <= 1) {
+ 		dprintk("No ACPI P-States\n");
+ 		goto err_out;
+ 	}
+ 
+-	if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+-		(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
++	if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
++		(data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ 		dprintk("Invalid control/status registers (%x - %x)\n",
+-			data->acpi_data.control_register.space_id,
+-			data->acpi_data.status_register.space_id);
++			data->acpi_data->control_register.space_id,
++			data->acpi_data->status_register.space_id);
+ 		goto err_out;
+ 	}
+ 
+ 	/* fill in data->powernow_table */
+ 	powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+-		* (data->acpi_data.state_count + 1)), GFP_KERNEL);
++		* (data->acpi_data->state_count + 1)), GFP_KERNEL);
+ 	if (!powernow_table) {
+ 		dprintk("powernow_table memory alloc failure\n");
+ 		goto err_out;
+@@ -808,17 +815,32 @@
+ 	if (ret_val)
+ 		goto err_out_mem;
+ 
+-	powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
+-	powernow_table[data->acpi_data.state_count].index = 0;
++	powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
++	powernow_table[data->acpi_data->state_count].index = 0;
+ 	data->powernow_table = powernow_table;
+ 
+ 	/* fill in data */
+-	data->numps = data->acpi_data.state_count;
++	data->numps = data->acpi_data->state_count;
+ 	print_basics(data);
+ 	powernow_k8_acpi_pst_values(data, 0);
+ 
+ 	/* notify BIOS that we exist */
+ 	acpi_processor_notify_smm(THIS_MODULE);
++
++	/* determine affinity, from ACPI if available */
++	if (preregister_valid) {
++		if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
++		    (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
++			data->starting_core_affinity = data->acpi_data->shared_cpu_map;
++		else
++			data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++	} else {
++		/* best guess from family if not */
++		if (cpu_family == CPU_HW_PSTATE)
++			data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++		else
++			data->starting_core_affinity = cpu_core_map[data->cpu];
++	}
+ 
+ 	return 0;
+ 
+@@ -826,10 +848,10 @@
+ 	kfree(powernow_table);
+ 
+ err_out:
+-	acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++	acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+ 
+-	/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
+-	data->acpi_data.state_count = 0;
++	/* data->acpi_data->state_count informs us at ->exit() whether ACPI was used */
++	data->acpi_data->state_count = 0;
+ 
+ 	return -ENODEV;
+ }
+@@ -837,41 +859,23 @@
+ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
+ {
+ 	int i;
++	u32 hi = 0, lo = 0;
++	rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
++	data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
+ 
+-	for (i = 0; i < data->acpi_data.state_count; i++) {
++	for (i = 0; i < data->acpi_data->state_count; i++) {
+ 		u32 index;
+-		u32 hi = 0, lo = 0;
+-		u32 fid;
+-		u32 did;
+ 
+-		index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
+-		if (index > MAX_HW_PSTATE) {
++		index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
++		if (index > data->max_hw_pstate) {
+ 			printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
+ 			printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
+-		}
+-		rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+-		if (!(hi & HW_PSTATE_VALID_MASK)) {
+-			dprintk("invalid pstate %d, ignoring\n", index);
+-			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ 			continue;
+ 		}
+ 
+-		fid = lo & HW_PSTATE_FID_MASK;
+-		did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++		powernow_table[i].index = index;
++		powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
+ 
+-		dprintk("   %d : fid 0x%x, did 0x%x\n", index, fid, did);
+-
+-		powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT);
+-
+-		powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did);
+-
+-		if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
+-			printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+-				powernow_table[i].frequency,
+-				(unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
+-			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+-			continue;
+-		}
+ 	}
+ 	return 0;
+ }
+@@ -880,16 +884,16 @@
+ {
+ 	int i;
+ 	int cntlofreq = 0;
+-	for (i = 0; i < data->acpi_data.state_count; i++) {
++	for (i = 0; i < data->acpi_data->state_count; i++) {
+ 		u32 fid;
+ 		u32 vid;
+ 
+ 		if (data->exttype) {
+-			fid = data->acpi_data.states[i].status & EXT_FID_MASK;
+-			vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
++			fid = data->acpi_data->states[i].status & EXT_FID_MASK;
++			vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
+ 		} else {
+-			fid = data->acpi_data.states[i].control & FID_MASK;
+-			vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
++			fid = data->acpi_data->states[i].control & FID_MASK;
++			vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
+ 		}
+ 
+ 		dprintk("   %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+@@ -930,10 +934,10 @@
+ 				cntlofreq = i;
+ 		}
+ 
+-		if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
++		if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
+ 			printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+ 				powernow_table[i].frequency,
+-				(unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
++				(unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
+ 			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ 			continue;
+ 		}
+@@ -943,14 +947,15 @@
+ 
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+ {
+-	if (data->acpi_data.state_count)
+-		acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++	if (data->acpi_data->state_count)
++		acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+ }
+ 
+ #else
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
++static int powernow_k8_cpu_preinit_acpi() { return -ENODEV; }
+ #endif /* CONFIG_X86_POWERNOW_K8_ACPI */
+ 
+ /* Take a frequency, and issue the fid/vid transition command */
+@@ -1012,22 +1017,18 @@
+ /* Take a frequency, and issue the hardware pstate transition command */
+ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index)
+ {
+-	u32 fid = 0;
+-	u32 did = 0;
+ 	u32 pstate = 0;
+ 	int res, i;
+ 	struct cpufreq_freqs freqs;
+ 
+ 	dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
+ 
+-	/* get fid did for hardware pstate transition */
++	/* get MSR index for hardware pstate transition */
+ 	pstate = index & HW_PSTATE_MASK;
+-	if (pstate > MAX_HW_PSTATE)
++	if (pstate > data->max_hw_pstate)
+ 		return 0;
+-	fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT;
+-	did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT;
+-	freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+-	freqs.new = find_khz_freq_from_fiddid(fid, did);
++	freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++	freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+ 
+ 	for_each_cpu_mask(i, *(data->available_cores)) {
+ 		freqs.cpu = i;
+@@ -1035,9 +1036,7 @@
+ 	}
+ 
+ 	res = transition_pstate(data, pstate);
+-	data->currfid = find_fid_from_pstate(pstate);
+-	data->currdid = find_did_from_pstate(pstate);
+-	freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++	freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+ 
+ 	for_each_cpu_mask(i, *(data->available_cores)) {
+ 		freqs.cpu = i;
+@@ -1082,10 +1081,7 @@
+ 	if (query_current_values_with_pending_wait(data))
+ 		goto err_out;
+ 
+-	if (cpu_family == CPU_HW_PSTATE)
+-		dprintk("targ: curr fid 0x%x, did 0x%x\n",
+-			data->currfid, data->currvid);
+-	else {
++	if (cpu_family != CPU_HW_PSTATE) {
+ 		dprintk("targ: curr fid 0x%x, vid 0x%x\n",
+ 		data->currfid, data->currvid);
+ 
+@@ -1116,7 +1112,7 @@
+ 	mutex_unlock(&fidvid_mutex);
+ 
+ 	if (cpu_family == CPU_HW_PSTATE)
+-		pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++		pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate);
+ 	else
+ 		pol->cur = find_khz_freq_from_fid(data->currfid);
+ 	ret = 0;
+@@ -1164,7 +1160,7 @@
+ 		 * an UP version, and is deprecated by AMD.
+ 		 */
+ 		if (num_online_cpus() != 1) {
+-			printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
++			printk(KERN_ERR PFX "Your BIOS does not provide _PSS objects.  PowerNow! does not work on SMP systems without _PSS objects.  Complain to your BIOS vendor.\n");
+ 			kfree(data);
+ 			return -ENODEV;
+ 		}
+@@ -1204,10 +1200,7 @@
+ 	set_cpus_allowed(current, oldmask);
+ 
+ 	pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
+-	if (cpu_family == CPU_HW_PSTATE)
+-		pol->cpus = cpumask_of_cpu(pol->cpu);
+-	else
+-		pol->cpus = cpu_core_map[pol->cpu];
++	pol->cpus = data->starting_core_affinity;
+ 	data->available_cores = &(pol->cpus);
+ 
+ 	/* Take a crude guess here.
+@@ -1216,7 +1209,7 @@
+ 	    + (3 * (1 << data->irt) * 10)) * 1000;
+ 
+ 	if (cpu_family == CPU_HW_PSTATE)
+-		pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++		pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+ 	else
+ 		pol->cur = find_khz_freq_from_fid(data->currfid);
+ 	dprintk("policy current frequency %d kHz\n", pol->cur);
+@@ -1233,8 +1226,7 @@
+ 	cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
+ 
+ 	if (cpu_family == CPU_HW_PSTATE)
+-		dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
+-			data->currfid, data->currdid);
++		dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate);
+ 	else
+ 		dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ 			data->currfid, data->currvid);
+@@ -1289,7 +1281,10 @@
+ 	if (query_current_values_with_pending_wait(data))
+ 		goto out;
+ 
+-	khz = find_khz_freq_from_fid(data->currfid);
++	if (cpu_family == CPU_HW_PSTATE)
++		khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++	else
++		khz = find_khz_freq_from_fid(data->currfid);
+ 
+ out:
+ 	set_cpus_allowed(current, oldmask);
+@@ -1323,6 +1318,7 @@
+ 	}
+ 
+ 	if (supported_cpus == num_online_cpus()) {
++		powernow_k8_cpu_preinit_acpi();
+ 		printk(KERN_INFO PFX "Found %d %s "
+ 			"processors (" VERSION ")\n", supported_cpus,
+ 			boot_cpu_data.x86_model_id);
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,5 @@
+ /*
+- *  (c) 2003-2006 Advanced Micro Devices, Inc.
++#*  (c) 2003-2006 Advanced Micro Devices, Inc.
+  *  Your use of this code is subject to the terms and conditions of the
+  *  GNU general public license version 2. See "COPYING" or
+  *  http://www.gnu.org/licenses/gpl.html
+@@ -10,6 +10,7 @@
+ 
+ 	u32 numps;  /* number of p-states */
+ 	u32 batps;  /* number of p-states supported on battery */
++	u32 max_hw_pstate; /* maximum legal hardware pstate */
+ 
+ 	/* these values are constant when the PSB is used to determine
+ 	 * vid/fid pairings, but are modified during the ->target() call
+@@ -21,8 +22,8 @@
+ 	u32 plllock; /* pll lock time, units 1 us */
+         u32 exttype; /* extended interface = 1 */
+ 
+-	/* keep track of the current fid / vid or did */
+-	u32 currvid, currfid, currdid;
++	/* keep track of the current fid / vid or pstate */
++	u32 currvid, currfid, currpstate;
+ 
+ 	/* the powernow_table includes all frequency and vid/fid pairings:
+ 	 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+@@ -32,12 +33,13 @@
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ 	/* the acpi table needs to be kept. it's only available if ACPI was
+ 	 * used to determine valid frequency/vid/fid states */
+-	struct acpi_processor_performance acpi_data;
++	struct acpi_processor_performance *acpi_data;
+ #endif
+ 	/* we need to keep track of associated cores, but let cpufreq
+ 	 * handle hotplug events - so just point at cpufreq pol->cpus
+ 	 * structure */
+ 	cpumask_t *available_cores;
++	cpumask_t starting_core_affinity;
+ };
+ 
+ 
+@@ -87,23 +89,14 @@
+ 
+ /* Hardware Pstate _PSS and MSR definitions */
+ #define USE_HW_PSTATE		0x00000080
+-#define HW_PSTATE_FID_MASK 	0x0000003f
+-#define HW_PSTATE_DID_MASK 	0x000001c0
+-#define HW_PSTATE_DID_SHIFT 	6
+-#define HW_PSTATE_MASK 		0x00000007
+-#define HW_PSTATE_VALID_MASK 	0x80000000
+-#define HW_FID_INDEX_SHIFT	8
+-#define HW_FID_INDEX_MASK	0x0000ff00
+-#define HW_DID_INDEX_SHIFT	16
+-#define HW_DID_INDEX_MASK	0x00ff0000
+-#define HW_WATTS_MASK		0xff
+-#define HW_PWR_DVR_MASK		0x300
+-#define HW_PWR_DVR_SHIFT	8
+-#define HW_PWR_MAX_MULT		3
+-#define MAX_HW_PSTATE		8	/* hw pstate supports up to 8 */
++#define HW_PSTATE_MASK		0x00000007
++#define HW_PSTATE_VALID_MASK	0x80000000
++#define HW_PSTATE_MAX_MASK	0x000000f0
++#define HW_PSTATE_MAX_SHIFT	4
+ #define MSR_PSTATE_DEF_BASE 	0xc0010064 /* base of Pstate MSRs */
+ #define MSR_PSTATE_STATUS 	0xc0010063 /* Pstate Status MSR */
+ #define MSR_PSTATE_CTRL 	0xc0010062 /* Pstate control MSR */
++#define MSR_PSTATE_CUR_LIMIT	0xc0010061 /* pstate current limit MSR */
+ 
+ /* define the two driver architectures */
+ #define CPU_OPTERON 0
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/mtrr/Makefile
+--- a/arch/i386/kernel/cpu/mtrr/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/cpu/mtrr/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -3,3 +3,4 @@
+ obj-y		+= cyrix.o
+ obj-y		+= centaur.o
+ 
++obj-$(CONFIG_XEN) := main.o if.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/cpu/mtrr/main-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/cpu/mtrr/main-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,198 @@
 +#include <linux/init.h>
 +#include <linux/proc_fs.h>
 +#include <linux/ctype.h>
@@ -2488,7 +3439,8 @@
 +
 +	op.cmd = XENPF_read_memtype;
 +	op.u.read_memtype.reg = reg;
-+	(void)HYPERVISOR_platform_op(&op);
++	if (unlikely(HYPERVISOR_platform_op(&op)))
++		memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
 +
 +	*size = op.u.read_memtype.nr_mfns;
 +	*base = op.u.read_memtype.mfn;
@@ -2665,24 +3617,10 @@
 +}
 +
 +subsys_initcall(mtrr_init);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/cpu/mtrr/Makefile tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/mtrr/Makefile
---- pristine-linux-2.6.18.2/arch/i386/kernel/cpu/mtrr/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/cpu/mtrr/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -3,3 +3,10 @@ obj-y		+= amd.o
- obj-y		+= cyrix.o
- obj-y		+= centaur.o
- 
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/crash.c tmp-linux-2.6-xen.patch/arch/i386/kernel/crash.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/crash.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/crash.c	2007-07-30 16:35:11.000000000 +0200
-@@ -90,6 +90,7 @@ static void crash_save_self(struct pt_re
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/crash.c
+--- a/arch/i386/kernel/crash.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/crash.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -90,6 +90,7 @@
  	crash_save_this_cpu(regs, cpu);
  }
  
@@ -2690,7 +3628,7 @@
  #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
  static atomic_t waiting_for_crash_ipi;
  
-@@ -154,6 +155,7 @@ static void nmi_shootdown_cpus(void)
+@@ -154,6 +155,7 @@
  	/* There are no cpus to shootdown */
  }
  #endif
@@ -2698,7 +3636,7 @@
  
  void machine_crash_shutdown(struct pt_regs *regs)
  {
-@@ -170,10 +172,12 @@ void machine_crash_shutdown(struct pt_re
+@@ -170,10 +172,12 @@
  
  	/* Make a note of crashing cpu. Will be used in NMI callback.*/
  	crashing_cpu = smp_processor_id();
@@ -2711,46 +3649,16 @@
 +#endif /* CONFIG_XEN */
  	crash_save_self(regs);
  }
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/early_printk-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/early_printk-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/early_printk-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/early_printk-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/early_printk-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/early_printk-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,2 @@
 +
 +#include "../../x86_64/kernel/early_printk-xen.c"
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/entry.S tmp-linux-2.6-xen.patch/arch/i386/kernel/entry.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/entry.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/entry.S	2007-10-14 01:51:15.000000000 +0200
-@@ -269,7 +269,7 @@ ENTRY(sysenter_entry)
- 	CFI_STARTPROC simple
- 	CFI_DEF_CFA esp, 0
- 	CFI_REGISTER esp, ebp
--	movl TSS_sysenter_esp0(%esp),%esp
-+	movl SYSENTER_stack_esp0(%esp),%esp
- sysenter_past_esp:
- 	/*
- 	 * No need to follow this irqs on/off section: the syscall
-@@ -689,7 +689,7 @@ device_not_available_emulate:
-  * that sets up the real kernel stack. Check here, since we can't
-  * allow the wrong stack to be used.
-  *
-- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
-  * already pushed 3 words if it hits on the sysenter instruction:
-  * eflags, cs and eip.
-  *
-@@ -701,7 +701,7 @@ device_not_available_emulate:
- 	cmpw $__KERNEL_CS,4(%esp);		\
- 	jne ok;					\
- label:						\
--	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
-+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
- 	pushfl;					\
- 	pushl $__KERNEL_CS;			\
- 	pushl $sysenter_past_esp
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/entry-xen.S tmp-linux-2.6-xen.patch/arch/i386/kernel/entry-xen.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/entry-xen.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1216 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/entry-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/entry-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1238 @@
 +/*
 + *  linux/arch/i386/entry.S
 + *
@@ -3134,6 +4042,29 @@
 +#endif /* !CONFIG_XEN */
 +	CFI_ENDPROC
 +
++	# pv sysenter call handler stub
++ENTRY(sysenter_entry_pv)
++	RING0_INT_FRAME
++	movl $__USER_DS,16(%esp)
++	movl %ebp,12(%esp)
++	movl $__USER_CS,4(%esp)
++	addl $4,%esp
++	/* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
++	pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++	cmpl $__PAGE_OFFSET-3,%ebp
++	jae syscall_fault
++1:	movl (%ebp),%ebp
++.section __ex_table,"a"
++	.align 4
++	.long 1b,syscall_fault
++.previous
++	/* fall through */
++	CFI_ENDPROC
++ENDPROC(sysenter_entry_pv)
 +
 +	# system call handler stub
 +ENTRY(system_call)
@@ -3963,13 +4894,42 @@
 +	CFI_ENDPROC
 +
 +.section .rodata,"a"
-+.align 4
 +#include "syscall_table.S"
 +
 +syscall_table_size=(.-sys_call_table)
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/fixup.c tmp-linux-2.6-xen.patch/arch/i386/kernel/fixup.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/fixup.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/fixup.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/entry.S
+--- a/arch/i386/kernel/entry.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/entry.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -269,7 +269,7 @@
+ 	CFI_STARTPROC simple
+ 	CFI_DEF_CFA esp, 0
+ 	CFI_REGISTER esp, ebp
+-	movl TSS_sysenter_esp0(%esp),%esp
++	movl SYSENTER_stack_esp0(%esp),%esp
+ sysenter_past_esp:
+ 	/*
+ 	 * No need to follow this irqs on/off section: the syscall
+@@ -687,7 +687,7 @@
+  * that sets up the real kernel stack. Check here, since we can't
+  * allow the wrong stack to be used.
+  *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
+  * already pushed 3 words if it hits on the sysenter instruction:
+  * eflags, cs and eip.
+  *
+@@ -699,7 +699,7 @@
+ 	cmpw $__KERNEL_CS,4(%esp);		\
+ 	jne ok;					\
+ label:						\
+-	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
++	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
+ 	pushfl;					\
+ 	pushl $__KERNEL_CS;			\
+ 	pushl $sysenter_past_esp
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/fixup.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/fixup.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,88 @@
 +/******************************************************************************
 + * fixup.c
@@ -4019,8 +4979,8 @@
 +	if (current->tgid == 1)
 +		return;
 +            
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
++	VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable,
++				  VMASST_TYPE_4gb_segments_notify));
 +
 +	if (test_and_set_bit(0, &printed))
 +		return;
@@ -4054,14 +5014,14 @@
 +
 +static int __init fixup_init(void)
 +{
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
++	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++				     VMASST_TYPE_4gb_segments_notify));
 +	return 0;
 +}
 +__initcall(fixup_init);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/head-xen.S tmp-linux-2.6-xen.patch/arch/i386/kernel/head-xen.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/head-xen.S	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/head-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/head-xen.S	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,207 @@
 +
 +
@@ -4270,9 +5230,9 @@
 +#endif
 +	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
 +	ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long,  1)
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/init_task-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/init_task-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/init_task-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/init_task-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/init_task-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/init_task-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,51 @@
 +#include <linux/mm.h>
 +#include <linux/module.h>
@@ -4325,10 +5285,10 @@
 +DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
 +#endif
 +
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/io_apic-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/io_apic-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/io_apic-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,2777 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/io_apic-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/io_apic-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2771 @@
 +/*
 + *	Intel IO-APIC support for multi-Pentium hosts.
 + *
@@ -4378,6 +5338,7 @@
 +
 +#include <xen/interface/xen.h>
 +#include <xen/interface/physdev.h>
++#include <xen/evtchn.h>
 +
 +/* Fake i8259 */
 +#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
@@ -4406,7 +5367,7 @@
 +	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
 +	apic_op.reg = reg;
 +	apic_op.value = value;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
 +}
 +
 +#define io_apic_read(a,r)    xen_io_apic_read(a,r)
@@ -5591,7 +6552,7 @@
 +	set_intr_gate(vector, interrupt[idx]);
 +}
 +#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
 +#endif
 +
 +static void __init setup_IO_APIC_irqs(void)
@@ -5869,8 +6830,6 @@
 +	return;
 +}
 +
-+#if 0
-+
 +static void print_APIC_bitfield (int base)
 +{
 +	unsigned int v;
@@ -6011,11 +6970,6 @@
 +	v = inb(0x4d1) << 8 | inb(0x4d0);
 +	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
 +}
-+
-+#endif  /*  0  */
-+
-+#else
-+void __init print_IO_APIC(void) { }
 +#endif /* !CONFIG_XEN */
 +
 +static void __init enable_IO_APIC(void)
@@ -6844,7 +7798,7 @@
 +		struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
 +		op.u.platform_quirk.quirk_id = sis_apic_bug ?
 +			QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
-+		HYPERVISOR_platform_op(&op);
++		VOID(HYPERVISOR_platform_op(&op));
 +	}
 +	return 0;
 +}
@@ -7106,10 +8060,10 @@
 +}
 +
 +#endif /* CONFIG_ACPI */
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/ioport-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/ioport-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/ioport-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,122 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/ioport-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/ioport-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,123 @@
 +/*
 + *	linux/arch/i386/kernel/ioport.c
 + *
@@ -7195,7 +8149,8 @@
 +
 +		set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
 +		set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++		WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++					      &set_iobitmap));
 +	}
 +
 +	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
@@ -7232,9 +8187,9 @@
 +	set_iopl_mask(t->iopl);
 +	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/irq-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/irq-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/irq-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/irq-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/irq-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,324 @@
 +/*
 + *	linux/arch/i386/kernel/irq.c
@@ -7560,9 +8515,9 @@
 +}
 +#endif
 +
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/ldt-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/ldt-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/ldt-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/ldt-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/ldt-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,270 @@
 +/*
 + * linux/kernel/ldt.c
@@ -7834,64 +8789,102 @@
 +	}
 +	return ret;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/machine_kexec.c tmp-linux-2.6-xen.patch/arch/i386/kernel/machine_kexec.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/machine_kexec.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/machine_kexec.c	2007-07-30 16:35:11.000000000 +0200
-@@ -19,123 +19,52 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/machine_kexec.c
+--- a/arch/i386/kernel/machine_kexec.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/machine_kexec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -19,123 +19,67 @@
  #include <asm/desc.h>
  #include <asm/system.h>
  
--#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
--
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
+ #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
++static u32 kexec_pgd[1024] PAGE_ALIGNED;
++#ifdef CONFIG_X86_PAE
++static u32 kexec_pmd0[1024] PAGE_ALIGNED;
++static u32 kexec_pmd1[1024] PAGE_ALIGNED;
++#endif
++static u32 kexec_pte0[1024] PAGE_ALIGNED;
++static u32 kexec_pte1[1024] PAGE_ALIGNED;
+ 
 -#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 -#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 -#define L2_ATTR (_PAGE_PRESENT)
--
++#ifdef CONFIG_XEN
+ 
 -#define LEVEL0_SIZE (1UL << 12UL)
--
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+ 
 -#ifndef CONFIG_X86_PAE
 -#define LEVEL1_SIZE (1UL << 22UL)
 -static u32 pgtable_level1[1024] PAGE_ALIGNED;
--
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
+ 
 -static void identity_map_page(unsigned long address)
--{
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
+ {
 -	unsigned long level1_index, level2_index;
 -	u32 *pgtable_level2;
--
++	void *control_page;
+ 
 -	/* Find the current page table */
 -	pgtable_level2 = __va(read_cr3());
--
++	memset(xki->page_list, 0, sizeof(xki->page_list));
+ 
 -	/* Find the indexes of the physical address to identity map */
 -	level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
 -	level2_index = address / LEVEL1_SIZE;
--
++	control_page = page_address(image->control_code_page);
++	memcpy(control_page, relocate_kernel, PAGE_SIZE);
+ 
 -	/* Identity map the page table entry */
 -	pgtable_level1[level1_index] = address | L0_ATTR;
 -	pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
--
++	xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++	xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++	xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++	xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++	xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++	xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+ 
 -	/* Flush the tlb so the new mapping takes effect.
 -	 * Global tlb entries are not flushed but that is not an issue.
 -	 */
 -	load_cr3(pgtable_level2);
--}
--
+ }
+ 
 -#else
 -#define LEVEL1_SIZE (1UL << 21UL)
 -#define LEVEL2_SIZE (1UL << 30UL)
 -static u64 pgtable_level1[512] PAGE_ALIGNED;
 -static u64 pgtable_level2[512] PAGE_ALIGNED;
--
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++					 struct resource *phys_cpus,
++					 int nr_phys_cpus)
++{
++	int k;
+ 
 -static void identity_map_page(unsigned long address)
 -{
 -	unsigned long level1_index, level2_index, level3_index;
 -	u64 *pgtable_level3;
--
++	/* The per-cpu crash note resources belong to the hypervisor resource */
++	for (k = 0; k < nr_phys_cpus; k++)
++		request_resource(hypervisor, phys_cpus + k);
+ 
 -	/* Find the current page table */
 -	pgtable_level3 = __va(read_cr3());
-+#ifdef CONFIG_XEN
-+#include <xen/interface/kexec.h>
-+#endif
- 
+-
 -	/* Find the indexes of the physical address to identity map */
 -	level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
 -	level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
@@ -7908,64 +8901,36 @@
 -	 */
 -	load_cr3(pgtable_level3);
 -}
-+#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
-+static u32 kexec_pgd[1024] PAGE_ALIGNED;
-+#ifdef CONFIG_X86_PAE
-+static u32 kexec_pmd0[1024] PAGE_ALIGNED;
-+static u32 kexec_pmd1[1024] PAGE_ALIGNED;
- #endif
-+static u32 kexec_pte0[1024] PAGE_ALIGNED;
-+static u32 kexec_pte1[1024] PAGE_ALIGNED;
- 
+-#endif
+-
 -static void set_idt(void *newidt, __u16 limit)
 -{
 -	struct Xgt_desc_struct curidt;
-+#ifdef CONFIG_XEN
- 
+-
 -	/* ia32 supports unaliged loads & stores */
 -	curidt.size    = limit;
 -	curidt.address = (unsigned long)newidt;
-+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
- 
+-
 -	load_idt(&curidt);
 -};
-+#if PAGES_NR > KEXEC_XEN_NO_PAGES
-+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
-+#endif
- 
-+#if PA_CONTROL_PAGE != 0
-+#error PA_CONTROL_PAGE is non zero - Xen support will break
-+#endif
- 
+-
+-
 -static void set_gdt(void *newgdt, __u16 limit)
-+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
- {
+-{
 -	struct Xgt_desc_struct curgdt;
-+	void *control_page;
- 
+-
 -	/* ia32 supports unaligned loads & stores */
 -	curgdt.size    = limit;
 -	curgdt.address = (unsigned long)newgdt;
-+	memset(xki->page_list, 0, sizeof(xki->page_list));
- 
+-
 -	load_gdt(&curgdt);
 -};
-+	control_page = page_address(image->control_code_page);
-+	memcpy(control_page, relocate_kernel, PAGE_SIZE);
- 
+-
 -static void load_segments(void)
 -{
 -#define __STR(X) #X
 -#define STR(X) __STR(X)
-+	xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
-+	xki->page_list[PA_PGD] = __ma(kexec_pgd);
-+#ifdef CONFIG_X86_PAE
-+	xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
-+	xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
-+#endif
-+	xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
-+	xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
- 
+-
 -	__asm__ __volatile__ (
 -		"\tljmp $"STR(__KERNEL_CS)",$1f\n"
 -		"\t1:\n"
@@ -7978,6 +8943,7 @@
 -		::: "eax", "memory");
 -#undef STR
 -#undef __STR
++	return 0;
  }
  
 -typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
@@ -7985,7 +8951,8 @@
 -					unsigned long reboot_code_buffer,
 -					unsigned long start_address,
 -					unsigned int has_pae) ATTRIB_NORET;
--
++void machine_kexec_register_resources(struct resource *res) { ; }
+ 
 -extern const unsigned char relocate_new_kernel[];
 -extern void relocate_new_kernel_end(void);
 -extern const unsigned int relocate_new_kernel_size;
@@ -7993,7 +8960,7 @@
  
  /*
   * A architecture hook called to validate the
-@@ -163,49 +92,38 @@ void machine_kexec_cleanup(struct kimage
+@@ -163,49 +107,38 @@
  {
  }
  
@@ -8018,10 +8985,26 @@
 -	reboot_code_buffer = page_to_pfn(image->control_code_page)
 -								<< PAGE_SHIFT;
 -	page_list = image->head;
--
++	control_page = page_address(image->control_code_page);
++	memcpy(control_page, relocate_kernel, PAGE_SIZE);
+ 
 -	/* Set up an identity mapping for the reboot_code_buffer */
 -	identity_map_page(reboot_code_buffer);
--
++	page_list[PA_CONTROL_PAGE] = __pa(control_page);
++	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
++	page_list[PA_PGD] = __pa(kexec_pgd);
++	page_list[VA_PGD] = (unsigned long)kexec_pgd;
++#ifdef CONFIG_X86_PAE
++	page_list[PA_PMD_0] = __pa(kexec_pmd0);
++	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
++	page_list[PA_PMD_1] = __pa(kexec_pmd1);
++	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
++#endif
++	page_list[PA_PTE_0] = __pa(kexec_pte0);
++	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
++	page_list[PA_PTE_1] = __pa(kexec_pte1);
++	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
+ 
 -	/* copy it out */
 -	memcpy((void *)reboot_code_buffer, relocate_new_kernel,
 -						relocate_new_kernel_size);
@@ -8045,78 +9028,13 @@
 -	/* now call it */
 -	rnk = (relocate_new_kernel_t) reboot_code_buffer;
 -	(*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);
-+	control_page = page_address(image->control_code_page);
-+	memcpy(control_page, relocate_kernel, PAGE_SIZE);
-+
-+	page_list[PA_CONTROL_PAGE] = __pa(control_page);
-+	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-+	page_list[PA_PGD] = __pa(kexec_pgd);
-+	page_list[VA_PGD] = (unsigned long)kexec_pgd;
-+#ifdef CONFIG_X86_PAE
-+	page_list[PA_PMD_0] = __pa(kexec_pmd0);
-+	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-+	page_list[PA_PMD_1] = __pa(kexec_pmd1);
-+	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-+#endif
-+	page_list[PA_PTE_0] = __pa(kexec_pte0);
-+	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-+	page_list[PA_PTE_1] = __pa(kexec_pte1);
-+	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
-+
 +	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
 +			image->start, cpu_has_pae);
  }
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/Makefile tmp-linux-2.6-xen.patch/arch/i386/kernel/Makefile
---- pristine-linux-2.6.18.2/arch/i386/kernel/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -44,6 +44,12 @@ EXTRA_AFLAGS   := -traditional
- 
- obj-$(CONFIG_SCx200)		+= scx200.o
- 
-+ifdef CONFIG_XEN
-+vsyscall_note := vsyscall-note-xen.o
-+else
-+vsyscall_note := vsyscall-note.o
-+endif
-+
- # vsyscall.o contains the vsyscall DSO images as __initdata.
- # We must build both images before we can assemble it.
- # Note: kbuild does not track this dependency due to usage of .incbin
-@@ -65,7 +71,7 @@ SYSCFLAGS_vsyscall-int80.so	= $(vsyscall
- 
- $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
--		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
-+		      $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
- 	$(call if_changed,syscall)
- 
- # We also create a special relocatable object that should mirror the symbol
-@@ -77,8 +83,20 @@ $(obj)/built-in.o: ld_flags += -R $(obj)
- 
- SYSCFLAGS_vsyscall-syms.o = -r
- $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
--			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
-+			$(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
- 	$(call if_changed,syscall)
- 
- k8-y                      += ../../x86_64/kernel/k8.o
- 
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y += fixup.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
-+n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/microcode-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/microcode-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/microcode-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/microcode-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/microcode-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/microcode-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,144 @@
 +/*
 + *	Intel CPU Microcode Update Driver for Linux
@@ -8262,9 +9180,9 @@
 +module_init(microcode_init)
 +module_exit(microcode_exit)
 +MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/mpparse-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/mpparse-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/mpparse-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/mpparse-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/mpparse-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/mpparse-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,1185 @@
 +/*
 + *	Intel Multiprocessor Specification 1.1 and 1.4
@@ -9451,10 +10369,10 @@
 +
 +#endif /* CONFIG_X86_IO_APIC */
 +#endif /* CONFIG_ACPI */
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/pci-dma-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/pci-dma-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/pci-dma-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/pci-dma-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,369 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/pci-dma-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/pci-dma-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,415 @@
 +/*
 + * Dynamic DMA mapping support.
 + *
@@ -9472,9 +10390,11 @@
 +#include <linux/version.h>
 +#include <asm/io.h>
 +#include <xen/balloon.h>
++#include <xen/gnttab.h>
 +#include <asm/swiotlb.h>
 +#include <asm/tlbflush.h>
 +#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
 +#include <asm/bug.h>
 +
 +#ifdef __x86_64__
@@ -9532,6 +10452,39 @@
 +	}						\
 +} while (0)
 +
++static int check_pages_physically_contiguous(unsigned long pfn, 
++					     unsigned int offset,
++					     size_t length)
++{
++	unsigned long next_mfn;
++	int i;
++	int nr_pages;
++	
++	next_mfn = pfn_to_mfn(pfn);
++	nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++	
++	for (i = 1; i < nr_pages; i++) {
++		if (pfn_to_mfn(++pfn) != ++next_mfn) 
++			return 0;
++	}
++	return 1;
++}
++
++int range_straddles_page_boundary(paddr_t p, size_t size)
++{
++	extern unsigned long *contiguous_bitmap;
++	unsigned long pfn = p >> PAGE_SHIFT;
++	unsigned int offset = p & ~PAGE_MASK;
++
++	if (offset + size <= PAGE_SIZE)
++		return 0;
++	if (test_bit(pfn, contiguous_bitmap))
++		return 0;
++	if (check_pages_physically_contiguous(pfn, offset, size))
++		return 0;
++	return 1;
++}
++
 +int
 +dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 +	   enum dma_data_direction direction)
@@ -9546,10 +10499,10 @@
 +		rc = swiotlb_map_sg(hwdev, sg, nents, direction);
 +	} else {
 +		for (i = 0; i < nents; i++ ) {
++			BUG_ON(!sg[i].page);
 +			sg[i].dma_address =
-+				page_to_bus(sg[i].page) + sg[i].offset;
++				gnttab_dma_map_page(sg[i].page) + sg[i].offset;
 +			sg[i].dma_length  = sg[i].length;
-+			BUG_ON(!sg[i].page);
 +			IOMMU_BUG_ON(address_needs_mapping(
 +				hwdev, sg[i].dma_address));
 +			IOMMU_BUG_ON(range_straddles_page_boundary(
@@ -9568,9 +10521,15 @@
 +dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 +	     enum dma_data_direction direction)
 +{
++	int i;
++
 +	BUG_ON(direction == DMA_NONE);
 +	if (swiotlb)
 +		swiotlb_unmap_sg(hwdev, sg, nents, direction);
++	else {
++		for (i = 0; i < nents; i++ )
++			gnttab_dma_unmap_page(sg[i].dma_address);
++	}
 +}
 +EXPORT_SYMBOL(dma_unmap_sg);
 +
@@ -9587,7 +10546,7 @@
 +		dma_addr = swiotlb_map_page(
 +			dev, page, offset, size, direction);
 +	} else {
-+		dma_addr = page_to_bus(page) + offset;
++		dma_addr = gnttab_dma_map_page(page) + offset;
 +		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
 +	}
 +
@@ -9602,6 +10561,8 @@
 +	BUG_ON(direction == DMA_NONE);
 +	if (swiotlb)
 +		swiotlb_unmap_page(dev, dma_address, size, direction);
++	else
++		gnttab_dma_unmap_page(dma_address);
 +}
 +EXPORT_SYMBOL(dma_unmap_page);
 +#endif /* CONFIG_HIGHMEM */
@@ -9786,7 +10747,8 @@
 +	if (swiotlb) {
 +		dma = swiotlb_map_single(dev, ptr, size, direction);
 +	} else {
-+		dma = virt_to_bus(ptr);
++		dma = gnttab_dma_map_page(virt_to_page(ptr)) +
++		      offset_in_page(ptr);
 +		IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
 +		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
 +	}
@@ -9804,6 +10766,8 @@
 +		BUG();
 +	if (swiotlb)
 +		swiotlb_unmap_single(dev, dma_addr, size, direction);
++	else
++		gnttab_dma_unmap_page(dma_addr);
 +}
 +EXPORT_SYMBOL(dma_unmap_single);
 +
@@ -9824,10 +10788,10 @@
 +		swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
 +}
 +EXPORT_SYMBOL(dma_sync_single_for_device);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/process-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/process-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/process-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,853 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/process-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/process-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,877 @@
 +/*
 + *  linux/arch/i386/kernel/process.c
 + *
@@ -9968,7 +10932,7 @@
 +	local_irq_disable();
 +	cpu_clear(smp_processor_id(), cpu_initialized);
 +	preempt_enable_no_resched();
-+	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++	VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
 +	cpu_bringup();
 +}
 +#else
@@ -10139,7 +11103,8 @@
 +		struct thread_struct *t = &tsk->thread;
 +		struct physdev_set_iobitmap set_iobitmap;
 +		memset(&set_iobitmap, 0, sizeof(set_iobitmap));
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++		WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++					      &set_iobitmap));
 +		kfree(t->io_bitmap_ptr);
 +		t->io_bitmap_ptr = NULL;
 +		clear_thread_flag(TIF_IO_BITMAP);
@@ -10381,8 +11346,14 @@
 +#ifndef CONFIG_X86_NO_TSS
 +	struct tss_struct *tss = &per_cpu(init_tss, cpu);
 +#endif
++#if CONFIG_XEN_COMPAT > 0x030002
 +	struct physdev_set_iopl iopl_op;
 +	struct physdev_set_iobitmap iobmp_op;
++#else
++	struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
 +	multicall_entry_t _mcl[8], *mcl = _mcl;
 +
 +	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
@@ -10430,9 +11401,15 @@
 +
 +	if (unlikely(prev->iopl != next->iopl)) {
 +		iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
++#if CONFIG_XEN_COMPAT > 0x030002
 +		mcl->op      = __HYPERVISOR_physdev_op;
 +		mcl->args[0] = PHYSDEVOP_set_iopl;
 +		mcl->args[1] = (unsigned long)&iopl_op;
++#else
++		mcl->op      = __HYPERVISOR_physdev_op_compat;
++		pdo->cmd     = PHYSDEVOP_set_iopl;
++		mcl->args[0] = (unsigned long)pdo++;
++#endif
 +		mcl++;
 +	}
 +
@@ -10440,13 +11417,24 @@
 +		set_xen_guest_handle(iobmp_op.bitmap,
 +				     (char *)next->io_bitmap_ptr);
 +		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
 +		mcl->op      = __HYPERVISOR_physdev_op;
 +		mcl->args[0] = PHYSDEVOP_set_iobitmap;
 +		mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++		mcl->op      = __HYPERVISOR_physdev_op_compat;
++		pdo->cmd     = PHYSDEVOP_set_iobitmap;
++		mcl->args[0] = (unsigned long)pdo++;
++#endif
 +		mcl++;
 +	}
 +
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++	BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++	if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++		BUG();
 +
 +	/*
 +	 * Restore %fs and %gs if needed.
@@ -10681,9 +11669,9 @@
 +		sp -= get_random_int() % 8192;
 +	return sp & ~0xf;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/quirks-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/quirks-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/quirks-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/quirks-xen.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/quirks-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/quirks-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,47 @@
 +/*
 + * This file contains work-arounds for x86 and x86_64 platform bugs.
@@ -10721,7 +11709,7 @@
 +		printk(KERN_INFO "Disabling irq balancing and affinity\n");
 +		op.cmd = XENPF_platform_quirk;
 +		op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
-+		(void)HYPERVISOR_platform_op(&op);
++		WARN_ON(HYPERVISOR_platform_op(&op));
 +	}
 +
 +	/* put back the original value for config space*/
@@ -10732,16 +11720,21 @@
 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_intel_irqbalance);
 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_intel_irqbalance);
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/relocate_kernel.S tmp-linux-2.6-xen.patch/arch/i386/kernel/relocate_kernel.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/relocate_kernel.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/relocate_kernel.S	2007-10-14 01:51:15.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/relocate_kernel.S
+--- a/arch/i386/kernel/relocate_kernel.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/relocate_kernel.S	Wed Sep 10 10:54:08 2008 +0100
 @@ -7,16 +7,138 @@
   */
  
  #include <linux/linkage.h>
 +#include <asm/page.h>
 +#include <asm/kexec.h>
-+
+ 
+-	/*
+-	 * Must be relocatable PIC code callable as a C function, that once
+-	 * it starts can not use the previous processes stack.
+-	 */
+-	.globl relocate_new_kernel
 +/*
 + * Must be relocatable PIC code callable as a C function
 + */
@@ -10866,12 +11859,7 @@
 +	orl	$PAGE_ATTR, %edx
 +	movl	%edx, (%eax)
 +#endif
- 
--	/*
--	 * Must be relocatable PIC code callable as a C function, that once
--	 * it starts can not use the previous processes stack.
--	 */
--	.globl relocate_new_kernel
++
  relocate_new_kernel:
  	/* read the arguments and say goodbye to the stack */
  	movl  4(%esp), %ebx /* page_list */
@@ -10880,7 +11868,7 @@
  	movl  12(%esp), %edx /* start address */
  	movl  16(%esp), %ecx /* cpu_has_pae */
  
-@@ -24,11 +146,57 @@ relocate_new_kernel:
+@@ -24,11 +146,57 @@
  	pushl $0
  	popfl
  
@@ -10942,7 +11930,7 @@
  
  	/* Set cr0 to a known state:
  	 * 31 0 == Paging disabled
-@@ -113,8 +281,20 @@ relocate_new_kernel:
+@@ -113,8 +281,20 @@
  	xorl    %edi, %edi
  	xorl    %ebp, %ebp
  	ret
@@ -10967,10 +11955,10 @@
 +idt_48:
 +	.word	0			/* limit */
 +	.long	0			/* base */
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/setup-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/setup-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/setup-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1898 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/setup-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/setup-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1924 @@
 +/*
 + *  linux/arch/i386/kernel/setup.c
 + *
@@ -11039,6 +12027,7 @@
 +#include <xen/interface/physdev.h>
 +#include <xen/interface/memory.h>
 +#include <xen/features.h>
++#include <xen/firmware.h>
 +#include <xen/xencons.h>
 +#include <setup_arch.h>
 +#include <bios_ebda.h>
@@ -11128,6 +12117,9 @@
 +};
 +struct edid_info edid_info;
 +EXPORT_SYMBOL_GPL(edid_info);
++#ifndef CONFIG_XEN
++#define copy_edid() (edid_info = EDID_INFO)
++#endif
 +struct ist_info ist_info;
 +#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
 +	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
@@ -11252,7 +12244,12 @@
 +}, {
 +	.name	= "keyboard",
 +	.start	= 0x0060,
-+	.end	= 0x006f,
++	.end	= 0x0060,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "keyboard",
++	.start	= 0x0064,
++	.end	= 0x0064,
 +	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
 +}, {
 +	.name	= "dma page reg",
@@ -11705,6 +12702,21 @@
 +#endif
 +		add_memory_region(start, size, type);
 +	} while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain()) {
++		struct xen_memory_map memmap;
++
++		memmap.nr_entries = E820MAX;
++		set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++			BUG();
++		machine_e820.nr_map = memmap.nr_entries;
++	} else
++		machine_e820 = e820;
++#endif
++
 +	return 0;
 +}
 +
@@ -11713,6 +12725,7 @@
 +#ifdef CONFIG_EDD_MODULE
 +EXPORT_SYMBOL(edd);
 +#endif
++#ifndef CONFIG_XEN
 +/**
 + * copy_edd() - Copy the BIOS EDD information
 + *              from boot_params into a safe place.
@@ -11725,6 +12738,7 @@
 +     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
 +     edd.edd_info_nr = EDD_NR;
 +}
++#endif
 +#else
 +static inline void copy_edd(void)
 +{
@@ -11995,6 +13009,35 @@
 +	return 0;
 +}
 +
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++	int i;
++
++#ifndef CONFIG_XEN
++	for (i = 0; i < e820.nr_map; i++) {
++		const struct e820entry *ei = &e820.map[i];
++#else
++	if (!is_initial_xendomain())
++		return 0;
++	for (i = 0; i < machine_e820.nr_map; ++i) {
++		const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++		if (type && ei->type != type)
++			continue;
++		if (ei->addr >= end || ei->addr + ei->size <= start)
++			continue;
++		return 1;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
 + /*
 +  * This function checks if the entire range <start,end> is mapped with type.
 +  *
@@ -12243,14 +13286,7 @@
 +	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
 +	unsigned int max_dma, low;
 +
-+	/*
-+	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
-+	 * We simply put all RAM in the DMA zone so that those drivers which
-+	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
-+	 * Those drivers that *do* require lowmem are screwed anyway when
-+	 * running over Xen!
-+	 */
-+	max_dma = max_low_pfn;
++	max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 +	low = max_low_pfn;
 +
 +	if (low < max_dma)
@@ -12347,10 +13383,6 @@
 +			crashk_res.end - crashk_res.start + 1);
 +#endif
 +#endif
-+
-+	if (!xen_feature(XENFEAT_auto_translated_physmap))
-+		phys_to_machine_mapping =
-+			(unsigned long *)xen_start_info->mfn_list;
 +}
 +
 +/*
@@ -12518,18 +13550,8 @@
 +static void __init register_memory(void)
 +{
 +#ifdef CONFIG_XEN
-+	if (is_initial_xendomain()) {
-+		struct xen_memory_map memmap;
-+
-+		memmap.nr_entries = E820MAX;
-+		set_xen_guest_handle(memmap.buffer, machine_e820.map);
-+
-+		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+			BUG();
-+
-+		machine_e820.nr_map = memmap.nr_entries;
++	if (is_initial_xendomain())
 +		e820_setup_gap(machine_e820.map, machine_e820.nr_map);
-+	}
 +	else
 +#endif
 +		e820_setup_gap(e820.map, e820.nr_map);
@@ -12566,11 +13588,13 @@
 +	/* Register a call for panic conditions. */
 +	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
 +
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
++	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++				     VMASST_TYPE_4gb_segments));
++	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++				     VMASST_TYPE_writable_pagetables));
 +
 +	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++	pre_setup_arch_hook();
 +	early_cpu_init();
 +#ifdef CONFIG_SMP
 +	prefill_possible_map();
@@ -12594,7 +13618,7 @@
 +	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
 + 	drive_info = DRIVE_INFO;
 + 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
++	copy_edid();
 +	apm_info.bios = APM_BIOS_INFO;
 +	ist_info = IST_INFO;
 +	saved_videomode = VIDEO_MODE;
@@ -12607,23 +13631,12 @@
 +	bootloader_type = LOADER_TYPE;
 +
 +	if (is_initial_xendomain()) {
-+		/* This is drawn from a dump from vgacon:startup in
-+		 * standard Linux. */
-+		screen_info.orig_video_mode = 3; 
-+		screen_info.orig_video_isVGA = 1;
-+		screen_info.orig_video_lines = 25;
-+		screen_info.orig_video_cols = 80;
-+		screen_info.orig_video_ega_bx = 3;
-+		screen_info.orig_video_points = 16;
-+		screen_info.orig_y = screen_info.orig_video_lines - 1;
-+		if (xen_start_info->console.dom0.info_size >=
-+		    sizeof(struct dom0_vga_console_info)) {
-+			const struct dom0_vga_console_info *info =
-+				(struct dom0_vga_console_info *)(
-+					(char *)xen_start_info +
-+					xen_start_info->console.dom0.info_off);
-+			dom0_init_screen_info(info);
-+		}
++		const struct dom0_vga_console_info *info =
++			(void *)((char *)xen_start_info +
++			         xen_start_info->console.dom0.info_off);
++
++		dom0_init_screen_info(info,
++		                      xen_start_info->console.dom0.info_size);
 +		xen_start_info->console.domU.mfn = 0;
 +		xen_start_info->console.domU.evtchn = 0;
 +	} else
@@ -12635,8 +13648,6 @@
 +	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
 +#endif
 +
-+	setup_xen_features();
-+
 +	ARCH_SETUP
 +	if (efi_enabled)
 +		efi_init();
@@ -12764,6 +13775,11 @@
 +		     virt_to_mfn(pfn_to_mfn_frame_list_list);
 +	}
 +
++	/* Mark all ISA DMA channels in-use - using them wouldn't work. */
++	for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++		if (i != 4 && request_dma(i, "xen") != 0)
++			BUG();
++
 +	/*
 +	 * NOTE: at this point the bootmem allocator is fully available.
 +	 */
@@ -12778,7 +13794,7 @@
 +		efi_map_memmap();
 +
 +	set_iopl.iopl = 1;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
 +
 +#ifdef CONFIG_ACPI
 +	if (!is_initial_xendomain()) {
@@ -12830,8 +13846,6 @@
 +#endif
 +	}
 +	tsc_init();
-+
-+	xencons_early_setup();
 +}
 +
 +static int
@@ -12869,10 +13883,56 @@
 + * c-basic-offset:8
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/smp-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/smp-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/smp-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,624 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/setup.c
+--- a/arch/i386/kernel/setup.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -261,7 +261,12 @@
+ }, {
+ 	.name	= "keyboard",
+ 	.start	= 0x0060,
+-	.end	= 0x006f,
++	.end	= 0x0060,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "keyboard",
++	.start	= 0x0064,
++	.end	= 0x0064,
+ 	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ 	.name	= "dma page reg",
+@@ -955,6 +960,28 @@
+ 	memory_present(0, start, end);
+ 	return 0;
+ }
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		const struct e820entry *ei = &e820.map[i];
++
++		if (type && ei->type != type)
++			continue;
++		if (ei->addr >= end || ei->addr + ei->size <= start)
++			continue;
++		return 1;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
+ 
+  /*
+   * This function checks if the entire range <start,end> is mapped with type.
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/smp-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/smp-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,605 @@
 +/*
 + *	Intel SMP support routines.
 + *
@@ -13316,21 +14376,6 @@
 +	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 +}
 +
-+#else
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
-+{ return 0; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm(struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+EXPORT_SYMBOL(flush_tlb_page);
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+
 +#endif /* XEN */
 +
 +/*
@@ -13415,11 +14460,11 @@
 +
 +	/* Wait for response */
 +	while (atomic_read(&data.started) != cpus)
-+		barrier();
++		cpu_relax();
 +
 +	if (wait)
 +		while (atomic_read(&data.finished) != cpus)
-+			barrier();
++			cpu_relax();
 +	spin_unlock(&call_lock);
 +
 +	return 0;
@@ -13433,9 +14478,7 @@
 +	 */
 +	cpu_clear(smp_processor_id(), cpu_online_map);
 +	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
++	disable_all_local_evtchn();
 +	if (cpu_data[smp_processor_id()].hlt_works_ok)
 +		for(;;) halt();
 +	for (;;);
@@ -13450,9 +14493,7 @@
 +	smp_call_function(stop_this_cpu, NULL, 1, 0);
 +
 +	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
++	disable_all_local_evtchn();
 +	local_irq_enable();
 +}
 +
@@ -13497,5856 +14538,5247 @@
 +	return IRQ_HANDLED;
 +}
 +
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/swiotlb.c tmp-linux-2.6-xen.patch/arch/i386/kernel/swiotlb.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/swiotlb.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/swiotlb.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,734 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/sysenter.c
+--- a/arch/i386/kernel/sysenter.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/sysenter.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -23,6 +23,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/unistd.h>
+ 
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+  * Should the kernel map a VDSO page into processes and pass its
+  * address down to glibc upon exec()?
+@@ -44,6 +48,7 @@
+ 
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_XEN
+ 	int cpu = get_cpu();
+ 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ 
+@@ -57,7 +62,36 @@
+ 	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+ 	wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+-	put_cpu();	
++#else
++	extern asmlinkage void sysenter_entry_pv(void);
++	static struct callback_register sysenter = {
++		.type = CALLBACKTYPE_sysenter,
++		.address = { __KERNEL_CS, (unsigned long)sysenter_entry_pv },
++	};
++
++	if (!boot_cpu_has(X86_FEATURE_SEP))
++		return;
++
++	get_cpu();
++
++	if (xen_feature(XENFEAT_supervisor_mode_kernel))
++		sysenter.address.eip = (unsigned long)sysenter_entry;
++
++	switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
++	case 0:
++		break;
++#if CONFIG_XEN_COMPAT < 0x030200
++	case -ENOSYS:
++		sysenter.type = CALLBACKTYPE_sysenter_deprecated;
++		if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
++			break;
++#endif
++	default:
++		clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++		break;
++	}
++#endif
++	put_cpu();
+ }
+ 
+ /*
+@@ -75,11 +109,6 @@
+ #ifdef CONFIG_COMPAT_VDSO
+ 	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
+ 	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+-#else
+-	/*
+-	 * In the non-compat case the ELF coredumping code needs the fixmap:
+-	 */
+-	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
+ #endif
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_SEP)) {
+@@ -142,6 +171,13 @@
+ 	vma->vm_end = addr + PAGE_SIZE;
+ 	/* MAYWRITE to allow gdb to COW and set breakpoints */
+ 	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++	/*
++	 * Make sure the vDSO gets into every core dump.
++	 * Dumping its contents makes post-mortem fully interpretable later
++	 * without matching up the same kernel and hardware config to see
++	 * what PC values meant.
++	 */
++	vma->vm_flags |= VM_ALWAYSDUMP;
+ 	vma->vm_flags |= mm->def_flags;
+ 	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ 	vma->vm_ops = &syscall_vm_ops;
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/time-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/time-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1201 @@
 +/*
-+ * Dynamic DMA mapping support.
++ *  linux/arch/i386/kernel/time.c
 + *
-+ * This implementation is a fallback for platforms that do not support
-+ * I/O TLBs (aka DMA address translation hardware).
-+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
-+ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
-+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
-+ *	David Mosberger-Tang <davidm at hpl.hp.com>
-+ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
++ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02    Alan Modra
++ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26    Markus Kuhn
++ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ *      precision CMOS clock update
++ * 1996-05-03    Ingo Molnar
++ *      fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
++ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05    (Various)
++ *	More robust do_fast_gettimeoffset() algorithm implemented
++ *	(works with APM, Cyrix 6x86MX and Centaur C6),
++ *	monotonic gettimeofday() with fast_get_timeoffset(),
++ *	drift-proof precision TSC calibration on boot
++ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
++ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
++ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
++ * 1998-12-16    Andrea Arcangeli
++ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ *	because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
++ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ *	serialize accesses to xtime/lost_ticks).
 + */
 +
-+#include <linux/cache.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
 +#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ctype.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
 +#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <linux/cpufreq.h>
++
 +#include <asm/io.h>
-+#include <asm/pci.h>
-+#include <asm/dma.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
 +#include <asm/uaccess.h>
-+#include <xen/interface/memory.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
 +
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
++#include "mach_time.h"
 +
-+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++#include <linux/timex.h>
 +
-+#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_bus((sg)->page) + (sg)->offset)
++#include <asm/hpet.h>
 +
-+/*
-+ * Maximum allowable number of contiguous slabs to map,
-+ * must be a power of 2.  What is the appropriate value ?
-+ * The complexity of {map,unmap}_single is linearly dependent on this value.
-+ */
-+#define IO_TLB_SEGSIZE	128
++#include <asm/arch_hooks.h>
 +
-+/*
-+ * log of the size of each IO TLB slab.  The number of slabs is command line
-+ * controllable.
-+ */
-+#define IO_TLB_SHIFT 11
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
 +
-+int swiotlb_force;
++#if defined (__i386__)
++#include <asm/i8259.h>
++#endif
 +
-+static char *iotlb_virt_start;
-+static unsigned long iotlb_nslabs;
++int pit_latch_buggy;              /* extern */
 +
-+/*
-+ * Used to do a quick range check in swiotlb_unmap_single and
-+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
-+ * API.
-+ */
-+static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
 +
-+/* Does the given dma address reside within the swiotlb aperture? */
-+static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
-+{
-+	unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
-+	return (pfn_valid(pfn)
-+		&& (pfn >= iotlb_pfn_start)
-+		&& (pfn < iotlb_pfn_end));
-+}
++unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
 +
-+/*
-+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
-+ */
-+static unsigned long io_tlb_overflow = 32*1024;
++extern unsigned long wall_jiffies;
 +
-+void *io_tlb_overflow_buffer;
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
 +
-+/*
-+ * This is a free list describing the number of free entries available from
-+ * each index
-+ */
-+static unsigned int *io_tlb_list;
-+static unsigned int io_tlb_index;
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
 +
-+/*
-+ * We need to save away the original address corresponding to a mapped entry
-+ * for the sync operations.
-+ */
-+static struct phys_addr {
-+	struct page *page;
-+	unsigned int offset;
-+} *io_tlb_orig_addr;
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
++	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
++	u32 tsc_to_nsec_mul;
++	u32 tsc_to_usec_mul;
++	int tsc_shift;
++	u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
 +
-+/*
-+ * Protect the above data structures in the map and unmap calls
-+ */
-+static DEFINE_SPINLOCK(io_tlb_lock);
++static struct timeval monotonic_tv;
++static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
 +
-+static unsigned int dma_bits;
-+static unsigned int __initdata max_dma_bits = 32;
-+static int __init
-+setup_dma_bits(char *str)
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time;   /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static void __clock_was_set(void *unused)
 +{
-+	max_dma_bits = simple_strtoul(str, NULL, 0);
-+	return 0;
++	clock_was_set();
 +}
-+__setup("dma_bits=", setup_dma_bits);
++static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
 +
-+static int __init
-+setup_io_tlb_npages(char *str)
++static inline void __normalize_time(time_t *sec, s64 *nsec)
 +{
-+	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
-+	if (isdigit(*str)) {
-+		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
-+			(20 - IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++	while (*nsec >= NSEC_PER_SEC) {
++		(*nsec) -= NSEC_PER_SEC;
++		(*sec)++;
 +	}
-+	if (*str == ',')
-+		++str;
-+	/*
-+         * NB. 'force' enables the swiotlb, but doesn't force its use for
-+         * every DMA like it does on native Linux. 'off' forcibly disables
-+         * use of the swiotlb.
-+         */
-+	if (!strcmp(str, "force"))
-+		swiotlb_force = 1;
-+	else if (!strcmp(str, "off"))
-+		swiotlb_force = -1;
++	while (*nsec < 0) {
++		(*nsec) += NSEC_PER_SEC;
++		(*sec)--;
++	}
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++	independent_wallclock = 1;
 +	return 1;
 +}
-+__setup("swiotlb=", setup_io_tlb_npages);
-+/* make io_tlb_overflow tunable too? */
++__setup("independent_wallclock", __independent_wallclock);
 +
-+/*
-+ * Statically reserve bounce buffer space and initialize bounce buffer data
-+ * structures for the software IO TLB used to implement the PCI DMA API.
-+ */
-+void
-+swiotlb_init_with_default_size (size_t default_size)
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
 +{
-+	unsigned long i, bytes;
-+	int rc;
++	permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++	return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
 +
-+	if (!iotlb_nslabs) {
-+		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+	}
++#if 0
++static void delay_tsc(unsigned long loops)
++{
++	unsigned long bclock, now;
 +
-+	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++	rdtscl(bclock);
++	do {
++		rep_nop();
++		rdtscl(now);
++	} while ((now - bclock) < loops);
++}
 +
-+	/*
-+	 * Get IO TLB memory from the low pages
-+	 */
-+	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
-+	if (!iotlb_virt_start)
-+		panic("Cannot allocate SWIOTLB buffer!\n");
++struct timer_opts timer_tsc = {
++	.name = "tsc",
++	.delay = delay_tsc,
++};
++#endif
 +
-+	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
-+	for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
-+		do {
-+			rc = xen_create_contiguous_region(
-+				(unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
-+				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
-+				dma_bits);
-+		} while (rc && dma_bits++ < max_dma_bits);
-+		if (rc) {
-+			if (i == 0)
-+				panic("No suitable physical memory available for SWIOTLB buffer!\n"
-+				      "Use dom0_mem Xen boot parameter to reserve\n"
-+				      "some DMA memory (e.g., dom0_mem=-128M).\n");
-+			iotlb_nslabs = i;
-+			i <<= IO_TLB_SHIFT;
-+			free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
-+			bytes = i;
-+			for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
-+				unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++	u64 product;
++#ifdef __i386__
++	u32 tmp1, tmp2;
++#endif
 +
-+				if (bits > dma_bits)
-+					dma_bits = bits;
-+			}
-+			break;
-+		}
-+	}
++	if (shift < 0)
++		delta >>= -shift;
++	else
++		delta <<= shift;
 +
-+	/*
-+	 * Allocate and initialize the free list array.  This array is used
-+	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
-+	 */
-+	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
-+	for (i = 0; i < iotlb_nslabs; i++)
-+ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-+	io_tlb_index = 0;
-+	io_tlb_orig_addr = alloc_bootmem(
-+		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++#ifdef __i386__
++	__asm__ (
++		"mul  %5       ; "
++		"mov  %4,%%eax ; "
++		"mov  %%edx,%4 ; "
++		"mul  %5       ; "
++		"xor  %5,%5    ; "
++		"add  %4,%%eax ; "
++		"adc  %5,%%edx ; "
++		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
++		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++	__asm__ (
++		"mul %%rdx ; shrd $32,%%rdx,%%rax"
++		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
 +
-+	/*
-+	 * Get the overflow emergency buffer
-+	 */
-+	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-+	if (!io_tlb_overflow_buffer)
-+		panic("Cannot allocate SWIOTLB overflow buffer!\n");
++	return product;
++}
 +
-+	do {
-+		rc = xen_create_contiguous_region(
-+			(unsigned long)io_tlb_overflow_buffer,
-+			get_order(io_tlb_overflow),
-+			dma_bits);
-+	} while (rc && dma_bits++ < max_dma_bits);
-+	if (rc)
-+		panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
++#if 0 /* defined (__i386__) */
++int read_current_timer(unsigned long *timer_val)
++{
++	rdtscl(*timer_val);
++	return 0;
++}
++#endif
 +
-+	iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
-+	iotlb_pfn_end   = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++void init_cpu_khz(void)
++{
++	u64 __cpu_khz = 1000000ULL << 32;
++	struct vcpu_time_info *info = &vcpu_info(0)->time;
++	do_div(__cpu_khz, info->tsc_to_system_mul);
++	if (info->tsc_shift < 0)
++		cpu_khz = __cpu_khz << -info->tsc_shift;
++	else
++		cpu_khz = __cpu_khz >> info->tsc_shift;
++}
 +
-+	printk(KERN_INFO "Software IO TLB enabled: \n"
-+	       " Aperture:     %lu megabytes\n"
-+	       " Kernel range: %p - %p\n"
-+	       " Address size: %u bits\n",
-+	       bytes >> 20,
-+	       iotlb_virt_start, iotlb_virt_start + bytes,
-+	       dma_bits);
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
 +}
 +
-+void
-+swiotlb_init(void)
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
 +{
-+	long ram_end;
-+	size_t defsz = 64 * (1 << 20); /* 64MB default size */
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
 +
-+	if (swiotlb_force == 1) {
-+		swiotlb = 1;
-+	} else if ((swiotlb_force != -1) &&
-+		   is_running_on_xen() &&
-+		   is_initial_xendomain()) {
-+		/* Domain 0 always has a swiotlb. */
-+		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+		if (ram_end <= 0x7ffff)
-+			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
-+		swiotlb = 1;
-+	}
++static void __update_wallclock(time_t sec, long nsec)
++{
++	long wtm_nsec, xtime_nsec;
++	time_t wtm_sec, xtime_sec;
++	u64 tmp, wc_nsec;
 +
-+	if (swiotlb)
-+		swiotlb_init_with_default_size(defsz);
-+	else
-+		printk(KERN_INFO "Software IO TLB disabled\n");
++	/* Adjust wall-clock time base based on wall_jiffies ticks. */
++	wc_nsec = processed_system_time;
++	wc_nsec += sec * (u64)NSEC_PER_SEC;
++	wc_nsec += nsec;
++	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++	/* Split wallclock base into seconds and nanoseconds. */
++	tmp = wc_nsec;
++	xtime_nsec = do_div(tmp, 1000000000);
++	xtime_sec  = (time_t)tmp;
++
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++	ntp_clear();
 +}
 +
-+/*
-+ * We use __copy_to_user_inatomic to transfer to the host buffer because the
-+ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
-+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
-+ * unnecessary copy from the aperture to the host buffer, and a page fault.
-+ */
-+static void
-+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++static void update_wallclock(void)
 +{
-+	if (PageHighMem(buffer.page)) {
-+		size_t len, bytes;
-+		char *dev, *host, *kmp;
-+		len = size;
-+		while (len != 0) {
-+			unsigned long flags;
++	shared_info_t *s = HYPERVISOR_shared_info;
 +
-+			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
-+				bytes = PAGE_SIZE - buffer.offset;
-+			local_irq_save(flags); /* protects KM_BOUNCE_READ */
-+			kmp  = kmap_atomic(buffer.page, KM_BOUNCE_READ);
-+			dev  = dma_addr + size - len;
-+			host = kmp + buffer.offset;
-+			if (dir == DMA_FROM_DEVICE) {
-+				if (__copy_to_user_inatomic(host, dev, bytes))
-+					/* inaccessible */;
-+			} else
-+				memcpy(dev, host, bytes);
-+			kunmap_atomic(kmp, KM_BOUNCE_READ);
-+			local_irq_restore(flags);
-+			len -= bytes;
-+			buffer.page++;
-+			buffer.offset = 0;
-+		}
-+	} else {
-+		char *host = (char *)phys_to_virt(
-+			page_to_pseudophys(buffer.page)) + buffer.offset;
-+		if (dir == DMA_FROM_DEVICE) {
-+			if (__copy_to_user_inatomic(host, dma_addr, size))
-+				/* inaccessible */;
-+		} else if (dir == DMA_TO_DEVICE)
-+			memcpy(dma_addr, host, size);
-+	}
++	do {
++		shadow_tv_version = s->wc_version;
++		rmb();
++		shadow_tv.tv_sec  = s->wc_sec;
++		shadow_tv.tv_nsec = s->wc_nsec;
++		rmb();
++	} while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++	if (!independent_wallclock)
++		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
 +}
 +
 +/*
-+ * Allocates bounce buffer and returns its kernel virtual address.
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
 + */
-+static void *
-+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++static void get_time_values_from_xen(unsigned int cpu)
 +{
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +	unsigned long flags;
-+	char *dma_addr;
-+	unsigned int nslots, stride, index, wrap;
-+	struct phys_addr slot_buf;
-+	int i;
-+
-+	/*
-+	 * For mappings greater than a page, we limit the stride (and
-+	 * hence alignment) to a page size.
-+	 */
-+	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	if (size > PAGE_SIZE)
-+		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-+	else
-+		stride = 1;
++	u32 pre_version, post_version;
 +
-+	BUG_ON(!nslots);
++	src = &vcpu_info(cpu)->time;
++	dst = &per_cpu(shadow_time, cpu);
 +
-+	/*
-+	 * Find suitable number of IO TLB entries size that will fit this
-+	 * request and allocate a buffer from that IO TLB pool.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		wrap = index = ALIGN(io_tlb_index, stride);
++	local_irq_save(flags);
 +
-+		if (index >= iotlb_nslabs)
-+			wrap = index = 0;
++	do {
++		pre_version = dst->version = src->version;
++		rmb();
++		dst->tsc_timestamp     = src->tsc_timestamp;
++		dst->system_timestamp  = src->system_time;
++		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
++		dst->tsc_shift         = src->tsc_shift;
++		rmb();
++		post_version = src->version;
++	} while ((pre_version & 1) | (pre_version ^ post_version));
 +
-+		do {
-+			/*
-+			 * If we find a slot that indicates we have 'nslots'
-+			 * number of contiguous buffers, we allocate the
-+			 * buffers from that slot and mark the entries as '0'
-+			 * indicating unavailable.
-+			 */
-+			if (io_tlb_list[index] >= nslots) {
-+				int count = 0;
++	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
 +
-+				for (i = index; i < (int)(index + nslots); i++)
-+					io_tlb_list[i] = 0;
-+				for (i = index - 1;
-+				     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+				     i--)
-+					io_tlb_list[i] = ++count;
-+				dma_addr = iotlb_virt_start +
-+					(index << IO_TLB_SHIFT);
++	local_irq_restore(flags);
++}
 +
-+				/*
-+				 * Update the indices to avoid searching in
-+				 * the next round.
-+				 */
-+				io_tlb_index = 
-+					((index + nslots) < iotlb_nslabs
-+					 ? (index + nslots) : 0);
++static inline int time_values_up_to_date(unsigned int cpu)
++{
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +
-+				goto found;
-+			}
-+			index += stride;
-+			if (index >= iotlb_nslabs)
-+				index = 0;
-+		} while (index != wrap);
++	src = &vcpu_info(cpu)->time;
++	dst = &per_cpu(shadow_time, cpu);
 +
-+		spin_unlock_irqrestore(&io_tlb_lock, flags);
-+		return NULL;
-+	}
-+  found:
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
++	rmb();
++	return (dst->version == src->version);
++}
 +
-+	/*
-+	 * Save away the mapping from the original address to the DMA address.
-+	 * This is needed when we sync the memory.  Then we sync the buffer if
-+	 * needed.
-+	 */
-+	slot_buf = buffer;
-+	for (i = 0; i < nslots; i++) {
-+		slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
-+		slot_buf.offset &= PAGE_SIZE - 1;
-+		io_tlb_orig_addr[index+i] = slot_buf;
-+		slot_buf.offset += 1 << IO_TLB_SHIFT;
-+	}
-+	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with.  It is required for NMI access to the
++ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
 +
-+	return dma_addr;
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++	unsigned char val;
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	val = inb_p(RTC_PORT(1));
++	lock_cmos_suffix(addr);
++	return val;
 +}
++EXPORT_SYMBOL(rtc_cmos_read);
 +
-+static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
++void rtc_cmos_write(unsigned char val, unsigned char addr)
 +{
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = io_tlb_orig_addr[index];
-+	buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
-+	buffer.page += buffer.offset >> PAGE_SHIFT;
-+	buffer.offset &= PAGE_SIZE - 1;
-+	return buffer;
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	outb_p(val, RTC_PORT(1));
++	lock_cmos_suffix(addr);
 +}
++EXPORT_SYMBOL(rtc_cmos_write);
 +
 +/*
-+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
 + */
-+static void
-+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++void do_gettimeofday(struct timeval *tv)
 +{
++	unsigned long seq;
++	unsigned long usec, sec;
 +	unsigned long flags;
-+	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	u32 local_time_version;
 +
-+	/*
-+	 * First, sync the memory before unmapping the entry
-+	 */
-+	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+	/*
-+	 * Return the buffer to the free list by setting the corresponding
-+	 * entries to indicate the number of contigous entries available.
-+	 * While returning the entries to the free list, we merge the entries
-+	 * with slots below and above the pool being returned.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-+			 io_tlb_list[index + nslots] : 0);
-+		/*
-+		 * Step 1: return the slots to the free list, merging the
-+		 * slots with superceeding slots
-+		 */
-+		for (i = index + nslots - 1; i >= index; i--)
-+			io_tlb_list[i] = ++count;
-+		/*
-+		 * Step 2: merge the returned slots with the preceding slots,
-+		 * if available (non zero)
-+		 */
-+		for (i = index - 1;
-+		     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+		     i--)
-+			io_tlb_list[i] = ++count;
-+	}
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
-+}
++	do {
++		unsigned long lost;
 +
-+static void
-+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
-+	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
-+	__sync_single(buffer, dma_addr, size, dir);
-+}
++		local_time_version = shadow->version;
++		seq = read_seqbegin(&xtime_lock);
 +
-+static void
-+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
-+{
-+	/*
-+	 * Ran out of IOMMU space for this operation. This is very bad.
-+	 * Unfortunately the drivers cannot handle this operation properly.
-+	 * unless they check for pci_dma_mapping_error (most don't)
-+	 * When the mapping is small enough return a static buffer to limit
-+	 * the damage, or panic when the transfer is too big.
-+	 */
-+	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
-+	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++		usec = get_usec_offset(shadow);
++		lost = jiffies - wall_jiffies;
 +
-+	if (size > io_tlb_overflow && do_panic) {
-+		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Memory would be corrupted\n");
-+		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Random memory would be DMAed\n");
-+	}
-+}
++		if (unlikely(lost))
++			usec += lost * (USEC_PER_SEC / HZ);
 +
-+/*
-+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
-+ * PCI address to use is returned.
-+ *
-+ * Once the device is given the dma address, the device owns this memory until
-+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
-+ */
-+dma_addr_t
-+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
-+{
-+	dma_addr_t dev_addr = virt_to_bus(ptr);
-+	void *map;
-+	struct phys_addr buffer;
++		sec = xtime.tv_sec;
++		usec += (xtime.tv_nsec / NSEC_PER_USEC);
 +
-+	BUG_ON(dir == DMA_NONE);
++		nsec = shadow->system_timestamp - processed_system_time;
++		__normalize_time(&sec, &nsec);
++		usec += (long)nsec / NSEC_PER_USEC;
 +
-+	/*
-+	 * If the pointer passed in happens to be in the device's DMA window,
-+	 * we can safely return the device addr and not worry about bounce
-+	 * buffering it.
-+	 */
-+	if (!range_straddles_page_boundary(__pa(ptr), size) &&
-+	    !address_needs_mapping(hwdev, dev_addr))
-+		return dev_addr;
++		if (unlikely(!time_values_up_to_date(cpu))) {
++			/*
++			 * We may have blocked for a long time,
++			 * rendering our calculations invalid
++			 * (e.g. the time delta may have
++			 * overflowed). Detect that and recalculate
++			 * with fresh values.
++			 */
++			get_time_values_from_xen(cpu);
++			continue;
++		}
++	} while (read_seqretry(&xtime_lock, seq) ||
++		 (local_time_version != shadow->version));
 +
-+	/*
-+	 * Oh well, have to allocate and map a bounce buffer.
-+	 */
-+	buffer.page   = virt_to_page(ptr);
-+	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
-+	map = map_single(hwdev, buffer, size, dir);
-+	if (!map) {
-+		swiotlb_full(hwdev, size, dir, 1);
-+		map = io_tlb_overflow_buffer;
++	put_cpu();
++
++	while (usec >= USEC_PER_SEC) {
++		usec -= USEC_PER_SEC;
++		sec++;
 +	}
 +
-+	dev_addr = virt_to_bus(map);
-+	return dev_addr;
-+}
++	spin_lock_irqsave(&monotonic_lock, flags);
++	if ((sec > monotonic_tv.tv_sec) ||
++	    ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
++	{
++		monotonic_tv.tv_sec = sec;
++		monotonic_tv.tv_usec = usec;
++	} else {
++		sec = monotonic_tv.tv_sec;
++		usec = monotonic_tv.tv_usec;
++	}
++	spin_unlock_irqrestore(&monotonic_lock, flags);
 +
-+/*
-+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
-+ * match what was provided for in a previous swiotlb_map_single call.  All
-+ * other usages are undefined.
-+ *
-+ * After this call, reads by the cpu to the buffer are guaranteed to see
-+ * whatever the device wrote there.
-+ */
-+void
-+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-+		     int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
 +}
 +
-+/*
-+ * Make physical memory consistent for a single streaming mode DMA translation
-+ * after a transfer.
-+ *
-+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
-+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
-+ * call this function before doing so.  At the next point you give the PCI dma
-+ * address back to the card, you must first perform a
-+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
-+ */
-+void
-+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-+			    size_t size, int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
++EXPORT_SYMBOL(do_gettimeofday);
 +
-+void
-+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-+			       size_t size, int dir)
++int do_settimeofday(struct timespec *tv)
 +{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
++	time_t sec;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	struct xen_platform_op op;
 +
-+/*
-+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
-+ * This is the scatter-gather version of the above swiotlb_map_single
-+ * interface.  Here the scatter gather list elements are each tagged with the
-+ * appropriate dma address and length.  They are obtained via
-+ * sg_dma_{address,length}(SG).
-+ *
-+ * NOTE: An implementation may be able to use a smaller number of
-+ *       DMA address/length pairs than there are SG table elements.
-+ *       (for example via virtual mapping capabilities)
-+ *       The routine returns the number of addr/length pairs actually
-+ *       used, at most nents.
-+ *
-+ * Device ownership issues as mentioned above for swiotlb_map_single are the
-+ * same here.
-+ */
-+int
-+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+	       int dir)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
-+	int i;
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
 +
-+	BUG_ON(dir == DMA_NONE);
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+	for (i = 0; i < nelems; i++, sg++) {
-+		dev_addr = SG_ENT_PHYS_ADDRESS(sg);
-+		if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
-+						  + sg->offset, sg->length)
-+		    || address_needs_mapping(hwdev, dev_addr)) {
-+			buffer.page   = sg->page;
-+			buffer.offset = sg->offset;
-+			map = map_single(hwdev, buffer, sg->length, dir);
-+			if (!map) {
-+				/* Don't panic here, we expect map_sg users
-+				   to do proper error handling. */
-+				swiotlb_full(hwdev, sg->length, dir, 0);
-+				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
-+				sg[0].dma_length = 0;
-+				return 0;
-+			}
-+			sg->dma_address = (dma_addr_t)virt_to_bus(map);
-+		} else
-+			sg->dma_address = dev_addr;
-+		sg->dma_length = sg->length;
-+	}
-+	return nelems;
-+}
++	write_seqlock_irq(&xtime_lock);
 +
-+/*
-+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
-+ * concerning calls here are the same as for swiotlb_unmap_single() above.
-+ */
-+void
-+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+		 int dir)
-+{
-+	int i;
++	/*
++	 * Ensure we don't get blocked for a long time so that our time delta
++	 * overflows. If that were to happen then our shadow time values would
++	 * be stale, so we can retry with fresh ones.
++	 */
++	for (;;) {
++		nsec = tv->tv_nsec - get_nsec_offset(shadow);
++		if (time_values_up_to_date(cpu))
++			break;
++		get_time_values_from_xen(cpu);
++	}
++	sec = tv->tv_sec;
++	__normalize_time(&sec, &nsec);
 +
-+	BUG_ON(dir == DMA_NONE);
++	if (is_initial_xendomain() && !independent_wallclock) {
++		op.cmd = XENPF_settime;
++		op.u.settime.secs        = sec;
++		op.u.settime.nsecs       = nsec;
++		op.u.settime.system_time = shadow->system_timestamp;
++		WARN_ON(HYPERVISOR_platform_op(&op));
++		update_wallclock();
++	} else if (independent_wallclock) {
++		nsec -= shadow->system_timestamp;
++		__normalize_time(&sec, &nsec);
++		__update_wallclock(sec, nsec);
++	}
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			unmap_single(hwdev, 
-+				     (void *)bus_to_virt(sg->dma_address),
-+				     sg->dma_length, dir);
-+}
++	/* Reset monotonic gettimeofday() timeval. */
++	spin_lock(&monotonic_lock);
++	monotonic_tv.tv_sec = 0;
++	monotonic_tv.tv_usec = 0;
++	spin_unlock(&monotonic_lock);
 +
-+/*
-+ * Make physical memory consistent for a set of streaming mode DMA translations
-+ * after a transfer.
-+ *
-+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
-+ * and usage.
-+ */
-+void
-+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+			int nelems, int dir)
-+{
-+	int i;
++	write_sequnlock_irq(&xtime_lock);
 +
-+	BUG_ON(dir == DMA_NONE);
++	put_cpu();
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
++	clock_was_set();
++	return 0;
 +}
 +
-+void
-+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+			   int nelems, int dir)
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
 +{
-+	int i;
++	time_t sec;
++	s64 nsec;
++	struct xen_platform_op op;
 +
-+	BUG_ON(dir == DMA_NONE);
++	if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++		return;
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
-+}
++	write_seqlock_irq(&xtime_lock);
 +
-+#ifdef CONFIG_HIGHMEM
++	sec  = xtime.tv_sec;
++	nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++	__normalize_time(&sec, &nsec);
 +
-+dma_addr_t
-+swiotlb_map_page(struct device *hwdev, struct page *page,
-+		 unsigned long offset, size_t size,
-+		 enum dma_data_direction direction)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
++	op.cmd = XENPF_settime;
++	op.u.settime.secs        = sec;
++	op.u.settime.nsecs       = nsec;
++	op.u.settime.system_time = processed_system_time;
++	WARN_ON(HYPERVISOR_platform_op(&op));
 +
-+	dev_addr = page_to_bus(page) + offset;
-+	if (address_needs_mapping(hwdev, dev_addr)) {
-+		buffer.page   = page;
-+		buffer.offset = offset;
-+		map = map_single(hwdev, buffer, size, direction);
-+		if (!map) {
-+			swiotlb_full(hwdev, size, direction, 1);
-+			map = io_tlb_overflow_buffer;
-+		}
-+		dev_addr = (dma_addr_t)virt_to_bus(map);
-+	}
++	update_wallclock();
 +
-+	return dev_addr;
++	write_sequnlock_irq(&xtime_lock);
++
++	/* Once per minute. */
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
 +}
 +
-+void
-+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+		   size_t size, enum dma_data_direction direction)
++static int set_rtc_mmss(unsigned long nowtime)
 +{
-+	BUG_ON(direction == DMA_NONE);
-+	if (in_swiotlb_aperture(dma_address))
-+		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
-+}
++	int retval;
++	unsigned long flags;
 +
-+#endif
++	if (independent_wallclock || !is_initial_xendomain())
++		return 0;
 +
-+int
-+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++	/* gets recalled with irq locally disabled */
++	/* XXX - does irqsave resolve this? -johnstul */
++	spin_lock_irqsave(&rtc_lock, flags);
++	if (efi_enabled)
++		retval = efi_set_rtc_mmss(nowtime);
++	else
++		retval = mach_set_rtc_mmss(nowtime);
++	spin_unlock_irqrestore(&rtc_lock, flags);
++
++	return retval;
 +}
 +
-+/*
-+ * Return whether the given PCI device DMA address mask can be supported
-+ * properly.  For example, if your device can only drive the low 24-bits
-+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
-+ * this function.
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ *		Note: This function is required to return accurate
++ *		time even in the absence of multiple timer ticks.
 + */
-+int
-+swiotlb_dma_supported (struct device *hwdev, u64 mask)
++unsigned long long monotonic_clock(void)
 +{
-+	return (mask >= ((1UL << dma_bits) - 1));
++	unsigned int cpu = get_cpu();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	u64 time;
++	u32 local_time_version;
++
++	do {
++		local_time_version = shadow->version;
++		barrier();
++		time = shadow->system_timestamp + get_nsec_offset(shadow);
++		if (!time_values_up_to_date(cpu))
++			get_time_values_from_xen(cpu);
++		barrier();
++	} while (local_time_version != shadow->version);
++
++	put_cpu();
++
++	return time;
 +}
++EXPORT_SYMBOL(monotonic_clock);
 +
-+EXPORT_SYMBOL(swiotlb_init);
-+EXPORT_SYMBOL(swiotlb_map_single);
-+EXPORT_SYMBOL(swiotlb_unmap_single);
-+EXPORT_SYMBOL(swiotlb_map_sg);
-+EXPORT_SYMBOL(swiotlb_unmap_sg);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-+EXPORT_SYMBOL(swiotlb_dma_supported);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/sysenter.c tmp-linux-2.6-xen.patch/arch/i386/kernel/sysenter.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/sysenter.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/sysenter.c	2007-07-30 16:35:11.000000000 +0200
-@@ -23,6 +23,10 @@
- #include <asm/pgtable.h>
- #include <asm/unistd.h>
- 
-+#ifdef CONFIG_XEN
-+#include <xen/interface/callback.h>
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++	return monotonic_clock();
++}
 +#endif
 +
- /*
-  * Should the kernel map a VDSO page into processes and pass its
-  * address down to glibc upon exec()?
-@@ -44,6 +48,7 @@ extern asmlinkage void sysenter_entry(vo
- 
- void enable_sep_cpu(void)
- {
-+#ifndef CONFIG_X86_NO_TSS
- 	int cpu = get_cpu();
- 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
- 
-@@ -58,6 +63,7 @@ void enable_sep_cpu(void)
- 	wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
- 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- 	put_cpu();	
-+#endif
- }
- 
- /*
-@@ -72,6 +78,18 @@ int __init sysenter_setup(void)
- {
- 	syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
- 
-+#ifdef CONFIG_XEN
-+	if (boot_cpu_has(X86_FEATURE_SEP)) {
-+		static struct callback_register __initdata sysenter = {
-+			.type = CALLBACKTYPE_sysenter,
-+			.address = { __KERNEL_CS, (unsigned long)sysenter_entry },
-+		};
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++	unsigned long pc = instruction_pointer(regs);
 +
-+		if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
-+			clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++#ifdef __x86_64__
++	/* Assume the lock function has either no stack frame or only a single word.
++	   This checks if the address on the stack looks like a kernel text address.
++	   There is a small window for false hits, but in that case the tick
++	   is just accounted to the spinlock function.
++	   Better would be to write these functions in assembler again
++	   and check exactly. */
++	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++		char *v = *(char **)regs->rsp;
++		if ((v >= _stext && v <= _etext) ||
++			(v >= _sinittext && v <= _einittext) ||
++			(v >= (char *)MODULES_VADDR  && v <= (char *)MODULES_END))
++			return (unsigned long)v;
++		return ((unsigned long *)regs->rsp)[1];
 +	}
++#else
++	if (!user_mode_vm(regs) && in_lock_functions(pc))
++		return *(unsigned long *)(regs->ebp + 4);
++#endif
++
++	return pc;
++}
++EXPORT_SYMBOL(profile_pc);
 +#endif
 +
- #ifdef CONFIG_COMPAT_VDSO
- 	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
- 	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/time-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/time-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/time-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/time-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1159 @@
 +/*
-+ *  linux/arch/i386/kernel/time.c
-+ *
-+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
-+ *
-+ * This file contains the PC-specific time handling details:
-+ * reading the RTC at bootup, etc..
-+ * 1994-07-02    Alan Modra
-+ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
-+ * 1995-03-26    Markus Kuhn
-+ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
-+ *      precision CMOS clock update
-+ * 1996-05-03    Ingo Molnar
-+ *      fixed time warps in do_[slow|fast]_gettimeoffset()
-+ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
-+ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
-+ * 1998-09-05    (Various)
-+ *	More robust do_fast_gettimeoffset() algorithm implemented
-+ *	(works with APM, Cyrix 6x86MX and Centaur C6),
-+ *	monotonic gettimeofday() with fast_get_timeoffset(),
-+ *	drift-proof precision TSC calibration on boot
-+ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
-+ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
-+ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
-+ * 1998-12-16    Andrea Arcangeli
-+ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
-+ *	because was not accounting lost_ticks.
-+ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
-+ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
-+ *	serialize accesses to xtime/lost_ticks).
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
 + */
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++	s64 delta, delta_cpu, stolen, blocked;
++	u64 sched_time;
++	unsigned int i, cpu = smp_processor_id();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/bcd.h>
-+#include <linux/efi.h>
-+#include <linux/mca.h>
-+#include <linux/sysctl.h>
-+#include <linux/percpu.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/posix-timers.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/irq.h>
-+#include <asm/msr.h>
-+#include <asm/delay.h>
-+#include <asm/mpspec.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/timer.h>
-+#include <asm/sections.h>
++	/*
++	 * Here we are in the timer irq handler. We just have irqs locally
++	 * disabled but we don't know if the timer_bh is running on the other
++	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++	 * the irq version of write_lock because as just said we have irq
++	 * locally disabled. -arca
++	 */
++	write_seqlock(&xtime_lock);
 +
-+#include "mach_time.h"
++	do {
++		get_time_values_from_xen(cpu);
 +
-+#include <linux/timex.h>
-+
-+#include <asm/hpet.h>
++		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
++		delta = delta_cpu =
++			shadow->system_timestamp + get_nsec_offset(shadow);
++		delta     -= processed_system_time;
++		delta_cpu -= per_cpu(processed_system_time, cpu);
 +
-+#include <asm/arch_hooks.h>
++		/*
++		 * Obtain a consistent snapshot of stolen/blocked cycles. We
++		 * can use state_entry_time to detect if we get preempted here.
++		 */
++		do {
++			sched_time = runstate->state_entry_time;
++			barrier();
++			stolen = runstate->time[RUNSTATE_runnable] +
++				runstate->time[RUNSTATE_offline] -
++				per_cpu(processed_stolen_time, cpu);
++			blocked = runstate->time[RUNSTATE_blocked] -
++				per_cpu(processed_blocked_time, cpu);
++			barrier();
++		} while (sched_time != runstate->state_entry_time);
++	} while (!time_values_up_to_date(cpu));
 +
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
++	if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++	     unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++	    && printk_ratelimit()) {
++		printk("Timer ISR/%u: Time went backwards: "
++		       "delta=%lld delta_cpu=%lld shadow=%lld "
++		       "off=%lld processed=%lld cpu_processed=%lld\n",
++		       cpu, delta, delta_cpu, shadow->system_timestamp,
++		       (s64)get_nsec_offset(shadow),
++		       processed_system_time,
++		       per_cpu(processed_system_time, cpu));
++		for (i = 0; i < num_online_cpus(); i++)
++			printk(" %d: %lld\n", i,
++			       per_cpu(processed_system_time, i));
++	}
 +
-+#if defined (__i386__)
-+#include <asm/i8259.h>
-+#endif
++	/* System-wide jiffy work. */
++	while (delta >= NS_PER_TICK) {
++		delta -= NS_PER_TICK;
++		processed_system_time += NS_PER_TICK;
++		do_timer(regs);
++	}
 +
-+int pit_latch_buggy;              /* extern */
++	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++		update_wallclock();
++		if (keventd_up())
++			schedule_work(&clock_was_set_work);
++	}
 +
-+#if defined(__x86_64__)
-+unsigned long vxtime_hz = PIT_TICK_RATE;
-+struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
-+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
-+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
-+struct timespec __xtime __section_xtime;
-+struct timezone __sys_tz __section_sys_tz;
-+#endif
++	write_sequnlock(&xtime_lock);
 +
-+unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
-+EXPORT_SYMBOL(cpu_khz);
++	/*
++	 * Account stolen ticks.
++	 * HACK: Passing NULL to account_steal_time()
++	 * ensures that the ticks are accounted as stolen.
++	 */
++	if ((stolen > 0) && (delta_cpu > 0)) {
++		delta_cpu -= stolen;
++		if (unlikely(delta_cpu < 0))
++			stolen += delta_cpu; /* clamp local-time progress */
++		do_div(stolen, NS_PER_TICK);
++		per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++		account_steal_time(NULL, (cputime_t)stolen);
++	}
 +
-+extern unsigned long wall_jiffies;
++	/*
++	 * Account blocked ticks.
++	 * HACK: Passing idle_task to account_steal_time()
++	 * ensures that the ticks are accounted as idle/wait.
++	 */
++	if ((blocked > 0) && (delta_cpu > 0)) {
++		delta_cpu -= blocked;
++		if (unlikely(delta_cpu < 0))
++			blocked += delta_cpu; /* clamp local-time progress */
++		do_div(blocked, NS_PER_TICK);
++		per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu)  += blocked * NS_PER_TICK;
++		account_steal_time(idle_task(cpu), (cputime_t)blocked);
++	}
 +
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
++	/* Account user/system ticks. */
++	if (delta_cpu > 0) {
++		do_div(delta_cpu, NS_PER_TICK);
++		per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++		if (user_mode_vm(regs))
++			account_user_time(current, (cputime_t)delta_cpu);
++		else
++			account_system_time(current, HARDIRQ_OFFSET,
++					    (cputime_t)delta_cpu);
++	}
 +
-+extern struct init_timer_opts timer_tsc_init;
-+extern struct timer_opts timer_tsc;
-+#define timer_none timer_tsc
++	/* Offlined for more than a few seconds? Avoid lockup warnings. */
++	if (stolen > 5*HZ)
++		touch_softlockup_watchdog();
 +
-+/* These are peridically updated in shared_info, and then copied here. */
-+struct shadow_time_info {
-+	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
-+	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
-+	u32 tsc_to_nsec_mul;
-+	u32 tsc_to_usec_mul;
-+	int tsc_shift;
-+	u32 version;
-+};
-+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-+static struct timespec shadow_tv;
-+static u32 shadow_tv_version;
++	/* Local timer processing (see update_process_times()). */
++	run_local_timers();
++	if (rcu_pending(cpu))
++		rcu_check_callbacks(cpu, user_mode_vm(regs));
++	scheduler_tick();
++	run_posix_cpu_timers(current);
++	profile_tick(CPU_PROFILING, regs);
 +
-+static struct timeval monotonic_tv;
-+static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
++	return IRQ_HANDLED;
++}
 +
-+/* Keep track of last time we did processing/updating of jiffies and xtime. */
-+static u64 processed_system_time;   /* System time (ns) at last processing. */
-+static DEFINE_PER_CPU(u64, processed_system_time);
++static void init_missing_ticks_accounting(unsigned int cpu)
++{
++	struct vcpu_register_runstate_memory_area area;
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++	int rc;
 +
-+/* How much CPU time was spent blocked and how much was 'stolen'? */
-+static DEFINE_PER_CPU(u64, processed_stolen_time);
-+static DEFINE_PER_CPU(u64, processed_blocked_time);
++	memset(runstate, 0, sizeof(*runstate));
 +
-+/* Current runstate of each CPU (updated automatically by the hypervisor). */
-+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++	area.addr.v = runstate;
++	rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++	WARN_ON(rc && rc != -ENOSYS);
 +
-+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
-+#define NS_PER_TICK (1000000000LL/HZ)
++	per_cpu(processed_blocked_time, cpu) =
++		runstate->time[RUNSTATE_blocked];
++	per_cpu(processed_stolen_time, cpu) =
++		runstate->time[RUNSTATE_runnable] +
++		runstate->time[RUNSTATE_offline];
++}
 +
-+static void __clock_was_set(void *unused)
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
 +{
-+	clock_was_set();
++	unsigned long retval;
++	unsigned long flags;
++
++	spin_lock_irqsave(&rtc_lock, flags);
++
++	if (efi_enabled)
++		retval = efi_get_time();
++	else
++		retval = mach_get_cmos_time();
++
++	spin_unlock_irqrestore(&rtc_lock, flags);
++
++	return retval;
 +}
-+static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++EXPORT_SYMBOL(get_cmos_time);
 +
-+static inline void __normalize_time(time_t *sec, s64 *nsec)
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
 +{
-+	while (*nsec >= NSEC_PER_SEC) {
-+		(*nsec) -= NSEC_PER_SEC;
-+		(*sec)++;
-+	}
-+	while (*nsec < 0) {
-+		(*nsec) += NSEC_PER_SEC;
-+		(*sec)--;
++	struct timeval now, next;
++	int fail = 1;
++
++	/*
++	 * If we have an externally synchronized Linux clock, then update
++	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 * This code is run on a timer.  If the clock is set, that timer
++	 * may not expire at the correct time.  Thus, we adjust...
++	 */
++	if (!ntp_synced())
++		/*
++		 * Not synced, exit, do not restart a timer (if one is
++		 * running, let it run out).
++		 */
++		return;
++
++	do_gettimeofday(&now);
++	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++		fail = set_rtc_mmss(now.tv_sec);
++
++	next.tv_usec = USEC_AFTER - now.tv_usec;
++	if (next.tv_usec <= 0)
++		next.tv_usec += USEC_PER_SEC;
++
++	if (!fail)
++		next.tv_sec = 659;
++	else
++		next.tv_sec = 0;
++
++	if (next.tv_usec >= USEC_PER_SEC) {
++		next.tv_sec++;
++		next.tv_usec -= USEC_PER_SEC;
 +	}
++	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
 +}
 +
-+/* Does this guest OS track Xen time, or set its wall clock independently? */
-+static int independent_wallclock = 0;
-+static int __init __independent_wallclock(char *str)
++void notify_arch_cmos_timer(void)
 +{
-+	independent_wallclock = 1;
-+	return 1;
++	mod_timer(&sync_cmos_timer, jiffies + 1);
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
 +}
-+__setup("independent_wallclock", __independent_wallclock);
 +
-+/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
-+static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
-+static int __init __permitted_clock_jitter(char *str)
++static int timer_resume(struct sys_device *dev)
 +{
-+	permitted_clock_jitter = simple_strtoul(str, NULL, 0);
-+	return 1;
++	extern void time_resume(void);
++	time_resume();
++	return 0;
 +}
-+__setup("permitted_clock_jitter=", __permitted_clock_jitter);
 +
-+#if 0
-+static void delay_tsc(unsigned long loops)
-+{
-+	unsigned long bclock, now;
++static struct sysdev_class timer_sysclass = {
++	.resume = timer_resume,
++	set_kset_name("timer"),
++};
 +
-+	rdtscl(bclock);
-+	do {
-+		rep_nop();
-+		rdtscl(now);
-+	} while ((now - bclock) < loops);
-+}
 +
-+struct timer_opts timer_tsc = {
-+	.name = "tsc",
-+	.delay = delay_tsc,
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++	.id	= 0,
++	.cls	= &timer_sysclass,
 +};
-+#endif
 +
-+/*
-+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
-+ * yielding a 64-bit result.
-+ */
-+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++static int time_init_device(void)
 +{
-+	u64 product;
-+#ifdef __i386__
-+	u32 tmp1, tmp2;
-+#endif
++	int error = sysdev_class_register(&timer_sysclass);
++	if (!error)
++		error = sysdev_register(&device_timer);
++	return error;
++}
 +
-+	if (shift < 0)
-+		delta >>= -shift;
-+	else
-+		delta <<= shift;
++device_initcall(time_init_device);
 +
-+#ifdef __i386__
-+	__asm__ (
-+		"mul  %5       ; "
-+		"mov  %4,%%eax ; "
-+		"mov  %%edx,%4 ; "
-+		"mul  %5       ; "
-+		"xor  %5,%5    ; "
-+		"add  %4,%%eax ; "
-+		"adc  %5,%%edx ; "
-+		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
-+		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-+#else
-+	__asm__ (
-+		"mul %%rdx ; shrd $32,%%rdx,%%rax"
-+		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-+#endif
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++	xtime.tv_sec = get_cmos_time();
++	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++	set_normalized_timespec(&wall_to_monotonic,
++		-xtime.tv_sec, -xtime.tv_nsec);
 +
-+	return product;
-+}
++	if ((hpet_enable() >= 0) && hpet_use_timer) {
++		printk("Using HPET for base-timer\n");
++	}
 +
-+#if 0 /* defined (__i386__) */
-+int read_current_timer(unsigned long *timer_val)
-+{
-+	rdtscl(*timer_val);
-+	return 0;
++	time_init_hook();
 +}
 +#endif
 +
-+void init_cpu_khz(void)
-+{
-+	u64 __cpu_khz = 1000000ULL << 32;
-+	struct vcpu_time_info *info = &vcpu_info(0)->time;
-+	do_div(__cpu_khz, info->tsc_to_system_mul);
-+	if (info->tsc_shift < 0)
-+		cpu_khz = __cpu_khz << -info->tsc_shift;
-+	else
-+		cpu_khz = __cpu_khz >> info->tsc_shift;
-+}
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
 +
-+static u64 get_nsec_offset(struct shadow_time_info *shadow)
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
 +{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++	per_cpu(timer_irq, 0) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			0,
++			timer_interrupt,
++			SA_INTERRUPT,
++			"timer0",
++			NULL);
++	BUG_ON(per_cpu(timer_irq, 0) < 0);
 +}
 +
-+static unsigned long get_usec_offset(struct shadow_time_info *shadow)
-+{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
-+}
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++	.period_ns = NS_PER_TICK
++};
 +
-+static void __update_wallclock(time_t sec, long nsec)
++void __init time_init(void)
 +{
-+	long wtm_nsec, xtime_nsec;
-+	time_t wtm_sec, xtime_sec;
-+	u64 tmp, wc_nsec;
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_capable()) {
++		/*
++		 * HPET initialization needs to do memory-mapped io. So, let
++		 * us do a late initialization after mem_init().
++		 */
++		late_time_init = hpet_time_init;
++		return;
++	}
++#endif
 +
-+	/* Adjust wall-clock time base based on wall_jiffies ticks. */
-+	wc_nsec = processed_system_time;
-+	wc_nsec += sec * (u64)NSEC_PER_SEC;
-+	wc_nsec += nsec;
-+	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++	switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++				   &xen_set_periodic_tick)) {
++	case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++	case -ENOSYS:
++#endif
++		break;
++	default:
++		BUG();
++	}
 +
-+	/* Split wallclock base into seconds and nanoseconds. */
-+	tmp = wc_nsec;
-+	xtime_nsec = do_div(tmp, 1000000000);
-+	xtime_sec  = (time_t)tmp;
++	get_time_values_from_xen(0);
 +
-+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
-+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++	init_missing_ticks_accounting(0);
 +
-+	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
-+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++	update_wallclock();
 +
-+	ntp_clear();
++	init_cpu_khz();
++	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++	       cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++	vxtime.mode = VXTIME_TSC;
++	vxtime.quot = (1000000L << 32) / vxtime_hz;
++	vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++	sync_core();
++	rdtscll(vxtime.last_tsc);
++#endif
++
++	/* Cannot request_irq() until kmem is initialised. */
++	late_time_init = setup_cpu0_timer_irq;
 +}
 +
-+static void update_wallclock(void)
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
 +{
-+	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned long seq;
++	long delta;
++	u64 st;
 +
 +	do {
-+		shadow_tv_version = s->wc_version;
-+		rmb();
-+		shadow_tv.tv_sec  = s->wc_sec;
-+		shadow_tv.tv_nsec = s->wc_nsec;
-+		rmb();
-+	} while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++		seq = read_seqbegin(&xtime_lock);
++		delta = j - jiffies;
++		if (delta < 1) {
++			/* Triggers in some wrap-around cases, but that's okay:
++			 * we just end up with a shorter timeout. */
++			st = processed_system_time + NS_PER_TICK;
++		} else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++			/* Very long timeout means there is no pending timer.
++			 * We indicate this to Xen by passing zero timeout. */
++			st = 0;
++		} else {
++			st = processed_system_time + delta * (u64)NS_PER_TICK;
++		}
++	} while (read_seqretry(&xtime_lock, seq));
 +
-+	if (!independent_wallclock)
-+		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++	return st;
 +}
++EXPORT_SYMBOL(jiffies_to_st);
 +
 +/*
-+ * Reads a consistent set of time-base values from Xen, into a shadow data
-+ * area.
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
 + */
-+static void get_time_values_from_xen(int cpu)
++static void stop_hz_timer(void)
 +{
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
++	struct vcpu_set_singleshot_timer singleshot;
++	unsigned int cpu = smp_processor_id();
++	unsigned long j;
++	int rc;
 +
-+	src = &vcpu_info(cpu)->time;
-+	dst = &per_cpu(shadow_time, cpu);
++	cpu_set(cpu, nohz_cpu_mask);
 +
-+	do {
-+		dst->version = src->version;
-+		rmb();
-+		dst->tsc_timestamp     = src->tsc_timestamp;
-+		dst->system_timestamp  = src->system_time;
-+		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
-+		dst->tsc_shift         = src->tsc_shift;
-+		rmb();
-+	} while ((src->version & 1) | (dst->version ^ src->version));
++	/* See matching smp_mb in rcu_start_batch in rcupdate.c.  These mbs  */
++	/* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a  */
++	/* value of rcp->cur that matches rdp->quiescbatch and allows us to  */
++	/* stop the hz timer then the cpumasks created for subsequent values */
++	/* of cur in rcu_start_batch are guaranteed to pick up the updated   */
++	/* nohz_cpu_mask and so will not depend on this cpu.                 */
 +
-+	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++	smp_mb();
++
++	/* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++	if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++	    (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++		cpu_clear(cpu, nohz_cpu_mask);
++		j = jiffies + 1;
++	}
++
++	singleshot.timeout_abs_ns = jiffies_to_st(j) + NS_PER_TICK/2;
++	singleshot.flags = 0;
++	rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++	if (rc) {
++		BUG_ON(rc != -ENOSYS);
++		rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++	}
++#endif
++	BUG_ON(rc);
 +}
 +
-+static inline int time_values_up_to_date(int cpu)
++static void start_hz_timer(void)
 +{
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
-+
-+	src = &vcpu_info(cpu)->time;
-+	dst = &per_cpu(shadow_time, cpu);
++	cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
 +
-+	rmb();
-+	return (dst->version == src->version);
++void raw_safe_halt(void)
++{
++	stop_hz_timer();
++	/* Blocking includes an implicit local_irq_enable(). */
++	HYPERVISOR_block();
++	start_hz_timer();
 +}
++EXPORT_SYMBOL(raw_safe_halt);
 +
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with.  It is required for NMI access to the
-+ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
-+{
-+	unsigned char val;
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	val = inb_p(RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+	return val;
-+}
-+EXPORT_SYMBOL(rtc_cmos_read);
-+
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
++void halt(void)
 +{
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	outb_p(val, RTC_PORT(1));
-+	lock_cmos_suffix(addr);
++	if (irqs_disabled())
++		VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
 +}
-+EXPORT_SYMBOL(rtc_cmos_write);
++EXPORT_SYMBOL(halt);
 +
-+/*
-+ * This version of gettimeofday has microsecond resolution
-+ * and better than microsecond precision on fast x86 machines with TSC.
-+ */
-+void do_gettimeofday(struct timeval *tv)
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
 +{
-+	unsigned long seq;
-+	unsigned long usec, sec;
-+	unsigned long flags;
-+	s64 nsec;
 +	unsigned int cpu;
-+	struct shadow_time_info *shadow;
-+	u32 local_time_version;
-+
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
-+
-+	do {
-+		unsigned long lost;
 +
-+		local_time_version = shadow->version;
-+		seq = read_seqbegin(&xtime_lock);
++	init_cpu_khz();
 +
-+		usec = get_usec_offset(shadow);
-+		lost = jiffies - wall_jiffies;
++	for_each_online_cpu(cpu) {
++		switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++					   &xen_set_periodic_tick)) {
++		case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++		case -ENOSYS:
++#endif
++			break;
++		default:
++			BUG();
++		}
++		get_time_values_from_xen(cpu);
++		per_cpu(processed_system_time, cpu) =
++			per_cpu(shadow_time, 0).system_timestamp;
++		init_missing_ticks_accounting(cpu);
++	}
 +
-+		if (unlikely(lost))
-+			usec += lost * (USEC_PER_SEC / HZ);
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
 +
-+		sec = xtime.tv_sec;
-+		usec += (xtime.tv_nsec / NSEC_PER_USEC);
++	update_wallclock();
++}
 +
-+		nsec = shadow->system_timestamp - processed_system_time;
-+		__normalize_time(&sec, &nsec);
-+		usec += (long)nsec / NSEC_PER_USEC;
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
 +
-+		if (unlikely(!time_values_up_to_date(cpu))) {
-+			/*
-+			 * We may have blocked for a long time,
-+			 * rendering our calculations invalid
-+			 * (e.g. the time delta may have
-+			 * overflowed). Detect that and recalculate
-+			 * with fresh values.
-+			 */
-+			get_time_values_from_xen(cpu);
-+			continue;
-+		}
-+	} while (read_seqretry(&xtime_lock, seq) ||
-+		 (local_time_version != shadow->version));
++int __cpuinit local_setup_timer(unsigned int cpu)
++{
++	int seq, irq;
 +
-+	put_cpu();
++	BUG_ON(cpu == 0);
 +
-+	while (usec >= USEC_PER_SEC) {
-+		usec -= USEC_PER_SEC;
-+		sec++;
++	switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++			   &xen_set_periodic_tick)) {
++	case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++	case -ENOSYS:
++#endif
++		break;
++	default:
++		BUG();
 +	}
 +
-+	spin_lock_irqsave(&monotonic_lock, flags);
-+	if ((sec > monotonic_tv.tv_sec) ||
-+	    ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
-+	{
-+		monotonic_tv.tv_sec = sec;
-+		monotonic_tv.tv_usec = usec;
-+	} else {
-+		sec = monotonic_tv.tv_sec;
-+		usec = monotonic_tv.tv_usec;
-+	}
-+	spin_unlock_irqrestore(&monotonic_lock, flags);
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++		per_cpu(processed_system_time, cpu) =
++			per_cpu(shadow_time, 0).system_timestamp;
++		init_missing_ticks_accounting(cpu);
++	} while (read_seqretry(&xtime_lock, seq));
 +
-+	tv->tv_sec = sec;
-+	tv->tv_usec = usec;
++	sprintf(timer_name[cpu], "timer%u", cpu);
++	irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++				      cpu,
++				      timer_interrupt,
++				      SA_INTERRUPT,
++				      timer_name[cpu],
++				      NULL);
++	if (irq < 0)
++		return irq;
++	per_cpu(timer_irq, cpu) = irq;
++
++	return 0;
 +}
 +
-+EXPORT_SYMBOL(do_gettimeofday);
++void __cpuexit local_teardown_timer(unsigned int cpu)
++{
++	BUG_ON(cpu == 0);
++	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
 +
-+int do_settimeofday(struct timespec *tv)
++#ifdef CONFIG_CPU_FREQ
++static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 
++				void *data)
 +{
-+	time_t sec;
-+	s64 nsec;
-+	unsigned int cpu;
-+	struct shadow_time_info *shadow;
++	struct cpufreq_freqs *freq = data;
 +	struct xen_platform_op op;
 +
-+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+		return -EINVAL;
-+
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
++	if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
++		return 0;
 +
-+	write_seqlock_irq(&xtime_lock);
++	if (val == CPUFREQ_PRECHANGE)
++		return 0;
 +
-+	/*
-+	 * Ensure we don't get blocked for a long time so that our time delta
-+	 * overflows. If that were to happen then our shadow time values would
-+	 * be stale, so we can retry with fresh ones.
-+	 */
-+	for (;;) {
-+		nsec = tv->tv_nsec - get_nsec_offset(shadow);
-+		if (time_values_up_to_date(cpu))
-+			break;
-+		get_time_values_from_xen(cpu);
-+	}
-+	sec = tv->tv_sec;
-+	__normalize_time(&sec, &nsec);
++	op.cmd = XENPF_change_freq;
++	op.u.change_freq.flags = 0;
++	op.u.change_freq.cpu = freq->cpu;
++	op.u.change_freq.freq = (u64)freq->new * 1000;
++	WARN_ON(HYPERVISOR_platform_op(&op));
 +
-+	if (is_initial_xendomain() && !independent_wallclock) {
-+		op.cmd = XENPF_settime;
-+		op.u.settime.secs        = sec;
-+		op.u.settime.nsecs       = nsec;
-+		op.u.settime.system_time = shadow->system_timestamp;
-+		HYPERVISOR_platform_op(&op);
-+		update_wallclock();
-+	} else if (independent_wallclock) {
-+		nsec -= shadow->system_timestamp;
-+		__normalize_time(&sec, &nsec);
-+		__update_wallclock(sec, nsec);
-+	}
++	return 0;
++}
 +
-+	/* Reset monotonic gettimeofday() timeval. */
-+	spin_lock(&monotonic_lock);
-+	monotonic_tv.tv_sec = 0;
-+	monotonic_tv.tv_usec = 0;
-+	spin_unlock(&monotonic_lock);
++static struct notifier_block time_cpufreq_notifier_block = {
++	.notifier_call = time_cpufreq_notifier
++};
 +
-+	write_sequnlock_irq(&xtime_lock);
++static int __init cpufreq_time_setup(void)
++{
++	if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
++			CPUFREQ_TRANSITION_NOTIFIER)) {
++		printk(KERN_ERR "failed to set up cpufreq notifier\n");
++		return -ENODEV;
++	}
++	return 0;
++}
 +
-+	put_cpu();
++core_initcall(cpufreq_time_setup);
++#endif
 +
-+	clock_was_set();
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++	{
++		.ctl_name	= 1,
++		.procname	= "independent_wallclock",
++		.data		= &independent_wallclock,
++		.maxlen		= sizeof(independent_wallclock),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec
++	},
++	{
++		.ctl_name	= 2,
++		.procname	= "permitted_clock_jitter",
++		.data		= &permitted_clock_jitter,
++		.maxlen		= sizeof(permitted_clock_jitter),
++		.mode		= 0644,
++		.proc_handler	= proc_doulongvec_minmax
++	},
++	{ 0 }
++};
++static ctl_table xen_table[] = {
++	{
++		.ctl_name	= 123,
++		.procname	= "xen",
++		.mode		= 0555,
++		.child		= xen_subtable},
++	{ 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++	(void)register_sysctl_table(xen_table, 0);
 +	return 0;
 +}
++__initcall(xen_sysctl_init);
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/traps-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/traps-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1190 @@
++/*
++ *  linux/arch/i386/traps.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
 +
-+EXPORT_SYMBOL(do_settimeofday);
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
 +
-+static void sync_xen_wallclock(unsigned long dummy);
-+static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
-+static void sync_xen_wallclock(unsigned long dummy)
-+{
-+	time_t sec;
-+	s64 nsec;
-+	struct xen_platform_op op;
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
 +
-+	if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
-+		return;
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
 +
-+	write_seqlock_irq(&xtime_lock);
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
 +
-+	sec  = xtime.tv_sec;
-+	nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
-+	__normalize_time(&sec, &nsec);
++#include <linux/module.h>
 +
-+	op.cmd = XENPF_settime;
-+	op.u.settime.secs        = sec;
-+	op.u.settime.nsecs       = nsec;
-+	op.u.settime.system_time = processed_system_time;
-+	HYPERVISOR_platform_op(&op);
++#include "mach_traps.h"
 +
-+	update_wallclock();
++asmlinkage int system_call(void);
 +
-+	write_sequnlock_irq(&xtime_lock);
++struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++		{ 0, 0 }, { 0, 0 } };
 +
-+	/* Once per minute. */
-+	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
-+}
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
 +
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval;
-+	unsigned long flags;
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
 +
-+	if (independent_wallclock || !is_initial_xendomain())
-+		return 0;
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
 +
-+	/* gets recalled with irq locally disabled */
-+	/* XXX - does irqsave resolve this? -johnstul */
-+	spin_lock_irqsave(&rtc_lock, flags);
-+	if (efi_enabled)
-+		retval = efi_set_rtc_mmss(nowtime);
-+	else
-+		retval = mach_set_rtc_mmss(nowtime);
-+	spin_unlock_irqrestore(&rtc_lock, flags);
++static int kstack_depth_to_print = 24;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
 +
-+	return retval;
++int register_die_notifier(struct notifier_block *nb)
++{
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&i386die_chain, nb);
 +}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
 +
-+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
-+ *		Note: This function is required to return accurate
-+ *		time even in the absence of multiple timer ticks.
-+ */
-+unsigned long long monotonic_clock(void)
++int unregister_die_notifier(struct notifier_block *nb)
 +{
-+	int cpu = get_cpu();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+	u64 time;
-+	u32 local_time_version;
-+
-+	do {
-+		local_time_version = shadow->version;
-+		barrier();
-+		time = shadow->system_timestamp + get_nsec_offset(shadow);
-+		if (!time_values_up_to_date(cpu))
-+			get_time_values_from_xen(cpu);
-+		barrier();
-+	} while (local_time_version != shadow->version);
-+
-+	put_cpu();
++	return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
 +
-+	return time;
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++	return	p > (void *)tinfo &&
++		p < (void *)tinfo + THREAD_SIZE - 3;
 +}
-+EXPORT_SYMBOL(monotonic_clock);
 +
-+#ifdef __x86_64__
-+unsigned long long sched_clock(void)
++/*
++ * Print one address/symbol entries per line.
++ */
++static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
 +{
-+	return monotonic_clock();
++	printk(" [<%08lx>] ", addr);
++
++	print_symbol("%s\n", addr);
 +}
-+#endif
 +
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+unsigned long profile_pc(struct pt_regs *regs)
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++				unsigned long *stack, unsigned long ebp,
++				char *log_lvl)
 +{
-+	unsigned long pc = instruction_pointer(regs);
++	unsigned long addr;
 +
-+#ifdef __x86_64__
-+	/* Assume the lock function has either no stack frame or only a single word.
-+	   This checks if the address on the stack looks like a kernel text address.
-+	   There is a small window for false hits, but in that case the tick
-+	   is just accounted to the spinlock function.
-+	   Better would be to write these functions in assembler again
-+	   and check exactly. */
-+	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
-+		char *v = *(char **)regs->rsp;
-+		if ((v >= _stext && v <= _etext) ||
-+			(v >= _sinittext && v <= _einittext) ||
-+			(v >= (char *)MODULES_VADDR  && v <= (char *)MODULES_END))
-+			return (unsigned long)v;
-+		return ((unsigned long *)regs->rsp)[1];
++#ifdef	CONFIG_FRAME_POINTER
++	while (valid_stack_ptr(tinfo, (void *)ebp)) {
++		addr = *(unsigned long *)(ebp + 4);
++		print_addr_and_symbol(addr, log_lvl);
++		/*
++		 * break out of recursive entries (such as
++		 * end_of_stack_stop_unwind_function):
++	 	 */
++		if (ebp == *(unsigned long *)ebp)
++			break;
++		ebp = *(unsigned long *)ebp;
 +	}
 +#else
-+	if (!user_mode_vm(regs) && in_lock_functions(pc))
-+		return *(unsigned long *)(regs->ebp + 4);
++	while (valid_stack_ptr(tinfo, stack)) {
++		addr = *stack++;
++		if (__kernel_text_address(addr))
++			print_addr_and_symbol(addr, log_lvl);
++	}
 +#endif
-+
-+	return pc;
++	return ebp;
 +}
-+EXPORT_SYMBOL(profile_pc);
-+#endif
 +
-+/*
-+ * This is the same as the above, except we _also_ save the current
-+ * Time Stamp Counter value at the time of the timer interrupt, so that
-+ * we later on can estimate the time of day more exactly.
-+ */
-+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++static asmlinkage int
++show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
 +{
-+	s64 delta, delta_cpu, stolen, blocked;
-+	u64 sched_time;
-+	int i, cpu = smp_processor_id();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++	int n = 0;
 +
-+	/*
-+	 * Here we are in the timer irq handler. We just have irqs locally
-+	 * disabled but we don't know if the timer_bh is running on the other
-+	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
-+	 * the irq version of write_lock because as just said we have irq
-+	 * locally disabled. -arca
-+	 */
-+	write_seqlock(&xtime_lock);
++	while (unwind(info) == 0 && UNW_PC(info)) {
++		n++;
++		print_addr_and_symbol(UNW_PC(info), log_lvl);
++		if (arch_unw_user_mode(info))
++			break;
++	}
++	return n;
++}
 +
-+	do {
-+		get_time_values_from_xen(cpu);
++static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++			       unsigned long *stack, char *log_lvl)
++{
++	unsigned long ebp;
 +
-+		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
-+		delta = delta_cpu =
-+			shadow->system_timestamp + get_nsec_offset(shadow);
-+		delta     -= processed_system_time;
-+		delta_cpu -= per_cpu(processed_system_time, cpu);
++	if (!task)
++		task = current;
 +
-+		/*
-+		 * Obtain a consistent snapshot of stolen/blocked cycles. We
-+		 * can use state_entry_time to detect if we get preempted here.
-+		 */
-+		do {
-+			sched_time = runstate->state_entry_time;
-+			barrier();
-+			stolen = runstate->time[RUNSTATE_runnable] +
-+				runstate->time[RUNSTATE_offline] -
-+				per_cpu(processed_stolen_time, cpu);
-+			blocked = runstate->time[RUNSTATE_blocked] -
-+				per_cpu(processed_blocked_time, cpu);
-+			barrier();
-+		} while (sched_time != runstate->state_entry_time);
-+	} while (!time_values_up_to_date(cpu));
++	if (call_trace >= 0) {
++		int unw_ret = 0;
++		struct unwind_frame_info info;
 +
-+	if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
-+	     unlikely(delta_cpu < -(s64)permitted_clock_jitter))
-+	    && printk_ratelimit()) {
-+		printk("Timer ISR/%d: Time went backwards: "
-+		       "delta=%lld delta_cpu=%lld shadow=%lld "
-+		       "off=%lld processed=%lld cpu_processed=%lld\n",
-+		       cpu, delta, delta_cpu, shadow->system_timestamp,
-+		       (s64)get_nsec_offset(shadow),
-+		       processed_system_time,
-+		       per_cpu(processed_system_time, cpu));
-+		for (i = 0; i < num_online_cpus(); i++)
-+			printk(" %d: %lld\n", i,
-+			       per_cpu(processed_system_time, i));
++		if (regs) {
++			if (unwind_init_frame_info(&info, task, regs) == 0)
++				unw_ret = show_trace_unwind(&info, log_lvl);
++		} else if (task == current)
++			unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++		else {
++			if (unwind_init_blocked(&info, task) == 0)
++				unw_ret = show_trace_unwind(&info, log_lvl);
++		}
++		if (unw_ret > 0) {
++			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++				print_symbol("DWARF2 unwinder stuck at %s\n",
++					     UNW_PC(&info));
++				if (UNW_SP(&info) >= PAGE_OFFSET) {
++					printk("Leftover inexact backtrace:\n");
++					stack = (void *)UNW_SP(&info);
++				} else
++					printk("Full inexact backtrace again:\n");
++			} else if (call_trace >= 1)
++				return;
++			else
++				printk("Full inexact backtrace again:\n");
++		} else
++			printk("Inexact backtrace:\n");
 +	}
 +
-+	/* System-wide jiffy work. */
-+	while (delta >= NS_PER_TICK) {
-+		delta -= NS_PER_TICK;
-+		processed_system_time += NS_PER_TICK;
-+		do_timer(regs);
++	if (task == current) {
++		/* Grab ebp right from our regs */
++		asm ("movl %%ebp, %0" : "=r" (ebp) : );
++	} else {
++		/* ebp is the last reg pushed by switch_to */
++		ebp = *(unsigned long *) task->thread.esp;
 +	}
 +
-+	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
-+		update_wallclock();
-+		if (keventd_up())
-+			schedule_work(&clock_was_set_work);
++	while (1) {
++		struct thread_info *context;
++		context = (struct thread_info *)
++			((unsigned long)stack & (~(THREAD_SIZE - 1)));
++		ebp = print_context_stack(context, stack, ebp, log_lvl);
++		stack = (unsigned long*)context->previous_esp;
++		if (!stack)
++			break;
++		printk("%s =======================\n", log_lvl);
 +	}
++}
 +
-+	write_sequnlock(&xtime_lock);
-+
-+	/*
-+	 * Account stolen ticks.
-+	 * HACK: Passing NULL to account_steal_time()
-+	 * ensures that the ticks are accounted as stolen.
-+	 */
-+	if ((stolen > 0) && (delta_cpu > 0)) {
-+		delta_cpu -= stolen;
-+		if (unlikely(delta_cpu < 0))
-+			stolen += delta_cpu; /* clamp local-time progress */
-+		do_div(stolen, NS_PER_TICK);
-+		per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
-+		per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
-+		account_steal_time(NULL, (cputime_t)stolen);
-+	}
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++{
++	show_trace_log_lvl(task, regs, stack, "");
++}
 +
-+	/*
-+	 * Account blocked ticks.
-+	 * HACK: Passing idle_task to account_steal_time()
-+	 * ensures that the ticks are accounted as idle/wait.
-+	 */
-+	if ((blocked > 0) && (delta_cpu > 0)) {
-+		delta_cpu -= blocked;
-+		if (unlikely(delta_cpu < 0))
-+			blocked += delta_cpu; /* clamp local-time progress */
-+		do_div(blocked, NS_PER_TICK);
-+		per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
-+		per_cpu(processed_system_time, cpu)  += blocked * NS_PER_TICK;
-+		account_steal_time(idle_task(cpu), (cputime_t)blocked);
-+	}
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++			       unsigned long *esp, char *log_lvl)
++{
++	unsigned long *stack;
++	int i;
 +
-+	/* Account user/system ticks. */
-+	if (delta_cpu > 0) {
-+		do_div(delta_cpu, NS_PER_TICK);
-+		per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
-+		if (user_mode_vm(regs))
-+			account_user_time(current, (cputime_t)delta_cpu);
++	if (esp == NULL) {
++		if (task)
++			esp = (unsigned long*)task->thread.esp;
 +		else
-+			account_system_time(current, HARDIRQ_OFFSET,
-+					    (cputime_t)delta_cpu);
++			esp = (unsigned long *)&esp;
 +	}
 +
-+	/* Offlined for more than a few seconds? Avoid lockup warnings. */
-+	if (stolen > 5*HZ)
-+		touch_softlockup_watchdog();
-+
-+	/* Local timer processing (see update_process_times()). */
-+	run_local_timers();
-+	if (rcu_pending(cpu))
-+		rcu_check_callbacks(cpu, user_mode_vm(regs));
-+	scheduler_tick();
-+	run_posix_cpu_timers(current);
-+	profile_tick(CPU_PROFILING, regs);
-+
-+	return IRQ_HANDLED;
++	stack = esp;
++	for(i = 0; i < kstack_depth_to_print; i++) {
++		if (kstack_end(stack))
++			break;
++		if (i && ((i % 8) == 0))
++			printk("\n%s       ", log_lvl);
++		printk("%08lx ", *stack++);
++	}
++	printk("\n%sCall Trace:\n", log_lvl);
++	show_trace_log_lvl(task, regs, esp, log_lvl);
 +}
 +
-+static void init_missing_ticks_accounting(int cpu)
++void show_stack(struct task_struct *task, unsigned long *esp)
 +{
-+	struct vcpu_register_runstate_memory_area area;
-+	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
-+	int rc;
-+
-+	memset(runstate, 0, sizeof(*runstate));
-+
-+	area.addr.v = runstate;
-+	rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
-+	WARN_ON(rc && rc != -ENOSYS);
-+
-+	per_cpu(processed_blocked_time, cpu) =
-+		runstate->time[RUNSTATE_blocked];
-+	per_cpu(processed_stolen_time, cpu) =
-+		runstate->time[RUNSTATE_runnable] +
-+		runstate->time[RUNSTATE_offline];
++	printk("       ");
++	show_stack_log_lvl(task, NULL, esp, "");
 +}
 +
-+/* not static: needed by APM */
-+unsigned long get_cmos_time(void)
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
 +{
-+	unsigned long retval;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&rtc_lock, flags);
-+
-+	if (efi_enabled)
-+		retval = efi_get_time();
-+	else
-+		retval = mach_get_cmos_time();
-+
-+	spin_unlock_irqrestore(&rtc_lock, flags);
++	unsigned long stack;
 +
-+	return retval;
++	show_trace(current, NULL, &stack);
 +}
-+EXPORT_SYMBOL(get_cmos_time);
-+
-+static void sync_cmos_clock(unsigned long dummy);
 +
-+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++EXPORT_SYMBOL(dump_stack);
 +
-+static void sync_cmos_clock(unsigned long dummy)
++void show_registers(struct pt_regs *regs)
 +{
-+	struct timeval now, next;
-+	int fail = 1;
++	int i;
++	int in_kernel = 1;
++	unsigned long esp;
++	unsigned short ss;
 +
++	esp = (unsigned long) (&regs->esp);
++	savesegment(ss, ss);
++	if (user_mode_vm(regs)) {
++		in_kernel = 0;
++		esp = regs->esp;
++		ss = regs->xss & 0xffff;
++	}
++	print_modules();
++	printk(KERN_EMERG "CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\n"
++			"EFLAGS: %08lx   (%s %.*s) \n",
++		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++		print_tainted(), regs->eflags, system_utsname.release,
++		(int)strcspn(system_utsname.version, " "),
++		system_utsname.version);
++	print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++	printk(KERN_EMERG "eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
++		regs->eax, regs->ebx, regs->ecx, regs->edx);
++	printk(KERN_EMERG "esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
++		regs->esi, regs->edi, regs->ebp, esp);
++	printk(KERN_EMERG "ds: %04x   es: %04x   ss: %04x\n",
++		regs->xds & 0xffff, regs->xes & 0xffff, ss);
++	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++		TASK_COMM_LEN, current->comm, current->pid,
++		current_thread_info(), current, current->thread_info);
 +	/*
-+	 * If we have an externally synchronized Linux clock, then update
-+	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+	 * called as close as possible to 500 ms before the new second starts.
-+	 * This code is run on a timer.  If the clock is set, that timer
-+	 * may not expire at the correct time.  Thus, we adjust...
++	 * When in-kernel, we also print out the stack and code at the
++	 * time of the fault..
 +	 */
-+	if (!ntp_synced())
-+		/*
-+		 * Not synced, exit, do not restart a timer (if one is
-+		 * running, let it run out).
-+		 */
-+		return;
++	if (in_kernel) {
++		u8 __user *eip;
 +
-+	do_gettimeofday(&now);
-+	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-+	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
-+		fail = set_rtc_mmss(now.tv_sec);
++		printk("\n" KERN_EMERG "Stack: ");
++		show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
 +
-+	next.tv_usec = USEC_AFTER - now.tv_usec;
-+	if (next.tv_usec <= 0)
-+		next.tv_usec += USEC_PER_SEC;
++		printk(KERN_EMERG "Code: ");
 +
-+	if (!fail)
-+		next.tv_sec = 659;
-+	else
-+		next.tv_sec = 0;
++		eip = (u8 __user *)regs->eip - 43;
++		for (i = 0; i < 64; i++, eip++) {
++			unsigned char c;
 +
-+	if (next.tv_usec >= USEC_PER_SEC) {
-+		next.tv_sec++;
-+		next.tv_usec -= USEC_PER_SEC;
++			if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++				printk(" Bad EIP value.");
++				break;
++			}
++			if (eip == (u8 __user *)regs->eip)
++				printk("<%02x> ", c);
++			else
++				printk("%02x ", c);
++		}
 +	}
-+	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-+}
++	printk("\n");
++}	
 +
-+void notify_arch_cmos_timer(void)
++static void handle_BUG(struct pt_regs *regs)
 +{
-+	mod_timer(&sync_cmos_timer, jiffies + 1);
-+	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
-+}
++	unsigned long eip = regs->eip;
++	unsigned short ud2;
 +
-+static long clock_cmos_diff, sleep_start;
++	if (eip < PAGE_OFFSET)
++		return;
++	if (__get_user(ud2, (unsigned short __user *)eip))
++		return;
++	if (ud2 != 0x0b0f)
++		return;
 +
-+static int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	/*
-+	 * Estimate time zone so that set_time can update the clock
-+	 */
-+	clock_cmos_diff = -get_cmos_time();
-+	clock_cmos_diff += get_seconds();
-+	sleep_start = get_cmos_time();
-+	return 0;
++	printk(KERN_EMERG "------------[ cut here ]------------\n");
++
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++	do {
++		unsigned short line;
++		char *file;
++		char c;
++
++		if (__get_user(line, (unsigned short __user *)(eip + 2)))
++			break;
++		if (__get_user(file, (char * __user *)(eip + 4)) ||
++		    (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++			file = "<bad filename>";
++
++		printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
++		return;
++	} while (0);
++#endif
++	printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
 +}
 +
-+static int timer_resume(struct sys_device *dev)
++/* This is gone through when something in the kernel
++ * has done something bad and is about to be terminated.
++*/
++void die(const char * str, struct pt_regs * regs, long err)
 +{
++	static struct {
++		spinlock_t lock;
++		u32 lock_owner;
++		int lock_owner_depth;
++	} die = {
++		.lock =			SPIN_LOCK_UNLOCKED,
++		.lock_owner =		-1,
++		.lock_owner_depth =	0
++	};
++	static int die_counter;
 +	unsigned long flags;
-+	unsigned long sec;
-+	unsigned long sleep_length;
 +
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_enabled())
-+		hpet_reenable();
-+#endif
-+	sec = get_cmos_time() + clock_cmos_diff;
-+	sleep_length = (get_cmos_time() - sleep_start) * HZ;
-+	write_seqlock_irqsave(&xtime_lock, flags);
-+	xtime.tv_sec = sec;
-+	xtime.tv_nsec = 0;
-+	jiffies_64 += sleep_length;
-+	wall_jiffies += sleep_length;
-+	write_sequnlock_irqrestore(&xtime_lock, flags);
-+	touch_softlockup_watchdog();
-+	return 0;
-+}
++	oops_enter();
 +
-+static struct sysdev_class timer_sysclass = {
-+	.resume = timer_resume,
-+	.suspend = timer_suspend,
-+	set_kset_name("timer"),
-+};
++	if (die.lock_owner != raw_smp_processor_id()) {
++		console_verbose();
++		spin_lock_irqsave(&die.lock, flags);
++		die.lock_owner = smp_processor_id();
++		die.lock_owner_depth = 0;
++		bust_spinlocks(1);
++	}
++	else
++		local_save_flags(flags);
 +
++	if (++die.lock_owner_depth < 3) {
++		int nl = 0;
++		unsigned long esp;
++		unsigned short ss;
 +
-+/* XXX this driverfs stuff should probably go elsewhere later -john */
-+static struct sys_device device_timer = {
-+	.id	= 0,
-+	.cls	= &timer_sysclass,
-+};
++		handle_BUG(regs);
++		printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++		printk(KERN_EMERG "PREEMPT ");
++		nl = 1;
++#endif
++#ifdef CONFIG_SMP
++		if (!nl)
++			printk(KERN_EMERG);
++		printk("SMP ");
++		nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++		if (!nl)
++			printk(KERN_EMERG);
++		printk("DEBUG_PAGEALLOC");
++		nl = 1;
++#endif
++		if (nl)
++			printk("\n");
++		if (notify_die(DIE_OOPS, str, regs, err,
++					current->thread.trap_no, SIGSEGV) !=
++				NOTIFY_STOP) {
++			show_registers(regs);
++			/* Executive summary in case the oops scrolled away */
++			esp = (unsigned long) (&regs->esp);
++			savesegment(ss, ss);
++			if (user_mode(regs)) {
++				esp = regs->esp;
++				ss = regs->xss & 0xffff;
++			}
++			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++			print_symbol("%s", regs->eip);
++			printk(" SS:ESP %04x:%08lx\n", ss, esp);
++		}
++		else
++			regs = NULL;
++  	} else
++		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
 +
-+static int time_init_device(void)
-+{
-+	int error = sysdev_class_register(&timer_sysclass);
-+	if (!error)
-+		error = sysdev_register(&device_timer);
-+	return error;
-+}
++	bust_spinlocks(0);
++	die.lock_owner = -1;
++	spin_unlock_irqrestore(&die.lock, flags);
 +
-+device_initcall(time_init_device);
++	if (!regs)
++		return;
 +
-+#ifdef CONFIG_HPET_TIMER
-+extern void (*late_time_init)(void);
-+/* Duplicate of time_init() below, with hpet_enable part added */
-+static void __init hpet_time_init(void)
-+{
-+	xtime.tv_sec = get_cmos_time();
-+	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
-+	set_normalized_timespec(&wall_to_monotonic,
-+		-xtime.tv_sec, -xtime.tv_nsec);
++	if (kexec_should_crash(current))
++		crash_kexec(regs);
 +
-+	if ((hpet_enable() >= 0) && hpet_use_timer) {
-+		printk("Using HPET for base-timer\n");
-+	}
++	if (in_interrupt())
++		panic("Fatal exception in interrupt");
 +
-+	time_init_hook();
-+}
-+#endif
++	if (panic_on_oops)
++		panic("Fatal exception");
 +
-+/* Dynamically-mapped IRQ. */
-+DEFINE_PER_CPU(int, timer_irq);
++	oops_exit();
++	do_exit(SIGSEGV);
++}
 +
-+extern void (*late_time_init)(void);
-+static void setup_cpu0_timer_irq(void)
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
 +{
-+	per_cpu(timer_irq, 0) =
-+		bind_virq_to_irqhandler(
-+			VIRQ_TIMER,
-+			0,
-+			timer_interrupt,
-+			SA_INTERRUPT,
-+			"timer0",
-+			NULL);
-+	BUG_ON(per_cpu(timer_irq, 0) < 0);
++	if (!user_mode_vm(regs))
++		die(str, regs, err);
 +}
 +
-+static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
-+	.period_ns = NS_PER_TICK
-+};
-+
-+void __init time_init(void)
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++			      struct pt_regs * regs, long error_code,
++			      siginfo_t *info)
 +{
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_capable()) {
-+		/*
-+		 * HPET initialization needs to do memory-mapped io. So, let
-+		 * us do a late initialization after mem_init().
-+		 */
-+		late_time_init = hpet_time_init;
-+		return;
-+	}
-+#endif
++	struct task_struct *tsk = current;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = trapnr;
 +
-+	HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
-+			   &xen_set_periodic_tick);
++	if (regs->eflags & VM_MASK) {
++		if (vm86)
++			goto vm86_trap;
++		goto trap_signal;
++	}
 +
-+	get_time_values_from_xen(0);
++	if (!user_mode(regs))
++		goto kernel_trap;
 +
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+	per_cpu(processed_system_time, 0) = processed_system_time;
-+	init_missing_ticks_accounting(0);
++	trap_signal: {
++		if (info)
++			force_sig_info(signr, info, tsk);
++		else
++			force_sig(signr, tsk);
++		return;
++	}
 +
-+	update_wallclock();
++	kernel_trap: {
++		if (!fixup_exception(regs))
++			die(str, regs, error_code);
++		return;
++	}
 +
-+	init_cpu_khz();
-+	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
-+	       cpu_khz / 1000, cpu_khz % 1000);
++	vm86_trap: {
++		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++		if (ret) goto trap_signal;
++		return;
++	}
++}
 +
-+#if defined(__x86_64__)
-+	vxtime.mode = VXTIME_TSC;
-+	vxtime.quot = (1000000L << 32) / vxtime_hz;
-+	vxtime.tsc_quot = (1000L << 32) / cpu_khz;
-+	sync_core();
-+	rdtscll(vxtime.last_tsc);
-+#endif
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
 +
-+	/* Cannot request_irq() until kmem is initialised. */
-+	late_time_init = setup_cpu0_timer_irq;
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
 +}
 +
-+/* Convert jiffies to system time. */
-+u64 jiffies_to_st(unsigned long j)
-+{
-+	unsigned long seq;
-+	long delta;
-+	u64 st;
-+
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		delta = j - jiffies;
-+		if (delta < 1) {
-+			/* Triggers in some wrap-around cases, but that's okay:
-+			 * we just end up with a shorter timeout. */
-+			st = processed_system_time + NS_PER_TICK;
-+		} else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
-+			/* Very long timeout means there is no pending timer.
-+			 * We indicate this to Xen by passing zero timeout. */
-+			st = 0;
-+		} else {
-+			st = processed_system_time + delta * (u64)NS_PER_TICK;
-+		}
-+	} while (read_seqretry(&xtime_lock, seq));
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
 +
-+	return st;
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
 +}
-+EXPORT_SYMBOL(jiffies_to_st);
 +
-+/*
-+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
-+ * These functions are based on implementations from arch/s390/kernel/time.c
-+ */
-+static void stop_hz_timer(void)
++DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++					      long error_code)
 +{
-+	struct vcpu_set_singleshot_timer singleshot;
-+	unsigned int cpu = smp_processor_id();
-+	unsigned long j;
-+	int rc;
++	current->thread.error_code = error_code;
++	current->thread.trap_no = 13;
 +
-+	cpu_set(cpu, nohz_cpu_mask);
++	if (regs->eflags & VM_MASK)
++		goto gp_in_vm86;
 +
-+	/* See matching smp_mb in rcu_start_batch in rcupdate.c.  These mbs  */
-+	/* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a  */
-+	/* value of rcp->cur that matches rdp->quiescbatch and allows us to  */
-+	/* stop the hz timer then the cpumasks created for subsequent values */
-+	/* of cur in rcu_start_batch are guaranteed to pick up the updated   */
-+	/* nohz_cpu_mask and so will not depend on this cpu.                 */
++	if (!user_mode(regs))
++		goto gp_in_kernel;
 +
-+	smp_mb();
++	current->thread.error_code = error_code;
++	current->thread.trap_no = 13;
++	force_sig(SIGSEGV, current);
++	return;
 +
-+	/* Leave ourselves in tick mode if rcu or softirq or timer pending. */
-+	if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
-+	    (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
-+		cpu_clear(cpu, nohz_cpu_mask);
-+		j = jiffies + 1;
-+	}
++gp_in_vm86:
++	local_irq_enable();
++	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++	return;
 +
-+	singleshot.timeout_abs_ns = jiffies_to_st(j);
-+	singleshot.flags = 0;
-+	rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
-+#if CONFIG_XEN_COMPAT <= 0x030004
-+	if (rc) {
-+		BUG_ON(rc != -ENOSYS);
-+		rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++gp_in_kernel:
++	if (!fixup_exception(regs)) {
++		if (notify_die(DIE_GPF, "general protection fault", regs,
++				error_code, 13, SIGSEGV) == NOTIFY_STOP)
++			return;
++		die("general protection fault", regs, error_code);
 +	}
-+#endif
-+	BUG_ON(rc);
 +}
 +
-+static void start_hz_timer(void)
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	cpu_clear(smp_processor_id(), nohz_cpu_mask);
-+}
++	printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
++			"to continue\n");
++	printk(KERN_EMERG "You probably have a hardware problem with your RAM "
++			"chips\n");
 +
-+void raw_safe_halt(void)
-+{
-+	stop_hz_timer();
-+	/* Blocking includes an implicit local_irq_enable(). */
-+	HYPERVISOR_block();
-+	start_hz_timer();
++	/* Clear and disable the memory parity error line. */
++	clear_mem_error(reason);
 +}
-+EXPORT_SYMBOL(raw_safe_halt);
 +
-+void halt(void)
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	if (irqs_disabled())
-+		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++	show_registers(regs);
++
++	/* Re-enable the IOCK line, wait for a few seconds */
++	clear_io_check_error(reason);
 +}
-+EXPORT_SYMBOL(halt);
 +
-+/* No locking required. Interrupts are disabled on all CPUs. */
-+void time_resume(void)
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	unsigned int cpu;
-+
-+	init_cpu_khz();
-+
-+	for_each_online_cpu(cpu) {
-+		HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
-+				   &xen_set_periodic_tick);
-+		get_time_values_from_xen(cpu);
-+		per_cpu(processed_system_time, cpu) =
-+			per_cpu(shadow_time, 0).system_timestamp;
-+		init_missing_ticks_accounting(cpu);
++#ifdef CONFIG_MCA
++	/* Might actually be able to figure out what the guilty party
++	* is. */
++	if( MCA_bus ) {
++		mca_handle_nmi();
++		return;
 +	}
-+
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+
-+	update_wallclock();
++#endif
++	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++		reason, smp_processor_id());
++	printk("Dazed and confused, but trying to continue\n");
++	printk("Do you have a strange power saving mode enabled?\n");
 +}
 +
-+#ifdef CONFIG_SMP
-+static char timer_name[NR_CPUS][15];
++static DEFINE_SPINLOCK(nmi_print_lock);
 +
-+int local_setup_timer(unsigned int cpu)
++void die_nmi (struct pt_regs *regs, const char *msg)
 +{
-+	int seq, irq;
-+
-+	BUG_ON(cpu == 0);
-+
-+	HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
-+			   &xen_set_periodic_tick);
++	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++	    NOTIFY_STOP)
++		return;
 +
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
-+		per_cpu(processed_system_time, cpu) =
-+			per_cpu(shadow_time, 0).system_timestamp;
-+		init_missing_ticks_accounting(cpu);
-+	} while (read_seqretry(&xtime_lock, seq));
++	spin_lock(&nmi_print_lock);
++	/*
++	* We are in trouble anyway, lets at least try
++	* to get a message out.
++	*/
++	bust_spinlocks(1);
++	printk(KERN_EMERG "%s", msg);
++	printk(" on CPU%d, eip %08lx, registers:\n",
++		smp_processor_id(), regs->eip);
++	show_registers(regs);
++	printk(KERN_EMERG "console shuts up ...\n");
++	console_silent();
++	spin_unlock(&nmi_print_lock);
++	bust_spinlocks(0);
 +
-+	sprintf(timer_name[cpu], "timer%d", cpu);
-+	irq = bind_virq_to_irqhandler(VIRQ_TIMER,
-+				      cpu,
-+				      timer_interrupt,
-+				      SA_INTERRUPT,
-+				      timer_name[cpu],
-+				      NULL);
-+	if (irq < 0)
-+		return irq;
-+	per_cpu(timer_irq, cpu) = irq;
++	/* If we are in kernel we are probably nested up pretty bad
++	 * and might aswell get out now while we still can.
++	*/
++	if (!user_mode_vm(regs)) {
++		current->thread.trap_no = 2;
++		crash_kexec(regs);
++	}
 +
-+	return 0;
++	do_exit(SIGSEGV);
 +}
 +
-+void local_teardown_timer(unsigned int cpu)
++static void default_do_nmi(struct pt_regs * regs)
 +{
-+	BUG_ON(cpu == 0);
-+	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
-+}
++	unsigned char reason = 0;
++
++	/* Only the BSP gets external NMIs from the system.  */
++	if (!smp_processor_id())
++		reason = get_nmi_reason();
++ 
++	if (!(reason & 0xc0)) {
++		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++							== NOTIFY_STOP)
++			return;
++#ifdef CONFIG_X86_LOCAL_APIC
++		/*
++		 * Ok, so this is none of the documented NMI sources,
++		 * so it must be the NMI watchdog.
++		 */
++		if (nmi_watchdog) {
++			nmi_watchdog_tick(regs);
++			return;
++		}
 +#endif
++		unknown_nmi_error(reason, regs);
++		return;
++	}
++	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++		return;
++	if (reason & 0x80)
++		mem_parity_error(reason, regs);
++	if (reason & 0x40)
++		io_check_error(reason, regs);
++	/*
++	 * Reassert NMI in case it became active meanwhile
++	 * as it's edge-triggered.
++	 */
++	reassert_nmi();
++}
 +
-+/*
-+ * /proc/sys/xen: This really belongs in another file. It can stay here for
-+ * now however.
-+ */
-+static ctl_table xen_subtable[] = {
-+	{
-+		.ctl_name	= 1,
-+		.procname	= "independent_wallclock",
-+		.data		= &independent_wallclock,
-+		.maxlen		= sizeof(independent_wallclock),
-+		.mode		= 0644,
-+		.proc_handler	= proc_dointvec
-+	},
-+	{
-+		.ctl_name	= 2,
-+		.procname	= "permitted_clock_jitter",
-+		.data		= &permitted_clock_jitter,
-+		.maxlen		= sizeof(permitted_clock_jitter),
-+		.mode		= 0644,
-+		.proc_handler	= proc_doulongvec_minmax
-+	},
-+	{ 0 }
-+};
-+static ctl_table xen_table[] = {
-+	{
-+		.ctl_name	= 123,
-+		.procname	= "xen",
-+		.mode		= 0555,
-+		.child		= xen_subtable},
-+	{ 0 }
-+};
-+static int __init xen_sysctl_init(void)
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
 +{
-+	(void)register_sysctl_table(xen_table, 0);
 +	return 0;
 +}
-+__initcall(xen_sysctl_init);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/traps.c tmp-linux-2.6-xen.patch/arch/i386/kernel/traps.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/traps.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/traps.c	2007-10-14 01:51:15.000000000 +0200
-@@ -642,18 +642,11 @@ static void mem_parity_error(unsigned ch
- 
- static void io_check_error(unsigned char reason, struct pt_regs * regs)
- {
--	unsigned long i;
--
- 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
- 	show_registers(regs);
- 
- 	/* Re-enable the IOCK line, wait for a few seconds */
--	reason = (reason & 0xf) | 8;
--	outb(reason, 0x61);
--	i = 2000;
--	while (--i) udelay(1000);
--	reason &= ~8;
--	outb(reason, 0x61);
-+	clear_io_check_error(reason);
- }
- 
- static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/traps-xen.c tmp-linux-2.6-xen.patch/arch/i386/kernel/traps-xen.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/traps-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1186 @@
-+/*
-+ *  linux/arch/i386/traps.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'asm.s'.
-+ */
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/highmem.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+#include <linux/unwind.h>
-+
-+#ifdef CONFIG_EISA
-+#include <linux/ioport.h>
-+#include <linux/eisa.h>
-+#endif
-+
-+#ifdef CONFIG_MCA
-+#include <linux/mca.h>
-+#endif
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/nmi.h>
-+#include <asm/unwind.h>
-+#include <asm/smp.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/kdebug.h>
-+
-+#include <linux/module.h>
-+
-+#include "mach_traps.h"
-+
-+asmlinkage int system_call(void);
++ 
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++ 
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++	int cpu;
 +
-+struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
-+		{ 0, 0 }, { 0, 0 } };
++	nmi_enter();
 +
-+/* Do we ignore FPU interrupts ? */
-+char ignore_fpu_irq = 0;
++	cpu = smp_processor_id();
 +
-+#ifndef CONFIG_X86_NO_IDT
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+#endif
++	++nmi_count(cpu);
 +
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void alignment_check(void);
-+#ifndef CONFIG_XEN
-+asmlinkage void spurious_interrupt_bug(void);
-+#else
-+asmlinkage void fixup_4gb_segment(void);
-+#endif
-+asmlinkage void machine_check(void);
++	if (!rcu_dereference(nmi_callback)(regs, cpu))
++		default_do_nmi(regs);
 +
-+static int kstack_depth_to_print = 24;
-+#ifdef CONFIG_STACK_UNWIND
-+static int call_trace = 1;
-+#else
-+#define call_trace (-1)
-+#endif
-+ATOMIC_NOTIFIER_HEAD(i386die_chain);
++	nmi_exit();
++}
 +
-+int register_die_notifier(struct notifier_block *nb)
++void set_nmi_callback(nmi_callback_t callback)
 +{
 +	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&i386die_chain, nb);
++	rcu_assign_pointer(nmi_callback, callback);
 +}
-+EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++EXPORT_SYMBOL_GPL(set_nmi_callback);
 +
-+int unregister_die_notifier(struct notifier_block *nb)
++void unset_nmi_callback(void)
 +{
-+	return atomic_notifier_chain_unregister(&i386die_chain, nb);
++	nmi_callback = dummy_nmi_callback;
 +}
-+EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
 +
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
 +{
-+	return	p > (void *)tinfo &&
-+		p < (void *)tinfo + THREAD_SIZE - 3;
++	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++			== NOTIFY_STOP)
++		return;
++	/* This is an interrupt gate, because kprobes wants interrupts
++	disabled.  Normal trap handlers don't. */
++	restore_interrupts(regs);
++	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
 +}
++#endif
 +
 +/*
-+ * Print one address/symbol entries per line.
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ * 
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
 + */
-+static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
 +{
-+	printk(" [<%08lx>] ", addr);
++	unsigned int condition;
++	struct task_struct *tsk = current;
 +
-+	print_symbol("%s\n", addr);
-+}
++	get_debugreg(condition, 6);
 +
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+				unsigned long *stack, unsigned long ebp,
-+				char *log_lvl)
-+{
-+	unsigned long addr;
++	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++					SIGTRAP) == NOTIFY_STOP)
++		return;
++	/* It's safe to allow irq's after DR6 has been saved */
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
 +
-+#ifdef	CONFIG_FRAME_POINTER
-+	while (valid_stack_ptr(tinfo, (void *)ebp)) {
-+		addr = *(unsigned long *)(ebp + 4);
-+		print_addr_and_symbol(addr, log_lvl);
-+		/*
-+		 * break out of recursive entries (such as
-+		 * end_of_stack_stop_unwind_function):
-+	 	 */
-+		if (ebp == *(unsigned long *)ebp)
-+			break;
-+		ebp = *(unsigned long *)ebp;
-+	}
-+#else
-+	while (valid_stack_ptr(tinfo, stack)) {
-+		addr = *stack++;
-+		if (__kernel_text_address(addr))
-+			print_addr_and_symbol(addr, log_lvl);
++	/* Mask out spurious debug traps due to lazy DR7 setting */
++	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++		if (!tsk->thread.debugreg[7])
++			goto clear_dr7;
 +	}
-+#endif
-+	return ebp;
-+}
 +
-+static asmlinkage int
-+show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
-+{
-+	int n = 0;
++	if (regs->eflags & VM_MASK)
++		goto debug_vm86;
 +
-+	while (unwind(info) == 0 && UNW_PC(info)) {
-+		n++;
-+		print_addr_and_symbol(UNW_PC(info), log_lvl);
-+		if (arch_unw_user_mode(info))
-+			break;
++	/* Save debug status register where ptrace can see it */
++	tsk->thread.debugreg[6] = condition;
++
++	/*
++	 * Single-stepping through TF: make sure we ignore any events in
++	 * kernel space (but re-enable TF when returning to user mode).
++	 */
++	if (condition & DR_STEP) {
++		/*
++		 * We already checked v86 mode above, so we can
++		 * check for kernel mode by just checking the CPL
++		 * of CS.
++		 */
++		if (!user_mode(regs))
++			goto clear_TF_reenable;
 +	}
-+	return n;
-+}
 +
-+static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+			       unsigned long *stack, char *log_lvl)
-+{
-+	unsigned long ebp;
++	/* Ok, finally something we can handle */
++	send_sigtrap(tsk, regs, error_code);
 +
-+	if (!task)
-+		task = current;
++	/* Disable additional traps. They'll be re-enabled when
++	 * the signal is delivered.
++	 */
++clear_dr7:
++	set_debugreg(0, 7);
++	return;
 +
-+	if (call_trace >= 0) {
-+		int unw_ret = 0;
-+		struct unwind_frame_info info;
++debug_vm86:
++	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++	return;
 +
-+		if (regs) {
-+			if (unwind_init_frame_info(&info, task, regs) == 0)
-+				unw_ret = show_trace_unwind(&info, log_lvl);
-+		} else if (task == current)
-+			unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
-+		else {
-+			if (unwind_init_blocked(&info, task) == 0)
-+				unw_ret = show_trace_unwind(&info, log_lvl);
-+		}
-+		if (unw_ret > 0) {
-+			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-+				print_symbol("DWARF2 unwinder stuck at %s\n",
-+					     UNW_PC(&info));
-+				if (UNW_SP(&info) >= PAGE_OFFSET) {
-+					printk("Leftover inexact backtrace:\n");
-+					stack = (void *)UNW_SP(&info);
-+				} else
-+					printk("Full inexact backtrace again:\n");
-+			} else if (call_trace >= 1)
-+				return;
-+			else
-+				printk("Full inexact backtrace again:\n");
-+		} else
-+			printk("Inexact backtrace:\n");
-+	}
++clear_TF_reenable:
++	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++	regs->eflags &= ~TF_MASK;
++	return;
++}
 +
-+	if (task == current) {
-+		/* Grab ebp right from our regs */
-+		asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+	} else {
-+		/* ebp is the last reg pushed by switch_to */
-+		ebp = *(unsigned long *) task->thread.esp;
-+	}
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short cwd, swd;
 +
-+	while (1) {
-+		struct thread_info *context;
-+		context = (struct thread_info *)
-+			((unsigned long)stack & (~(THREAD_SIZE - 1)));
-+		ebp = print_context_stack(context, stack, ebp, log_lvl);
-+		stack = (unsigned long*)context->previous_esp;
-+		if (!stack)
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 16;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
++	/*
++	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
++	 * status.  0x3f is the exception bits in these regs, 0x200 is the
++	 * C1 reg you need in case of a stack fault, 0x040 is the stack
++	 * fault bit.  We should only be taking one exception at a time,
++	 * so if this combination doesn't produce any single exception,
++	 * then we have a bad program that isn't syncronizing its FPU usage
++	 * and it will suffer the consequences since we won't be able to
++	 * fully reproduce the context of the exception
++	 */
++	cwd = get_fpu_cwd(task);
++	swd = get_fpu_swd(task);
++	switch (swd & ~cwd & 0x3f) {
++		case 0x000: /* No unmasked exception */
++			return;
++		default:    /* Multiple exceptions */
++			break;
++		case 0x001: /* Invalid Op */
++			/*
++			 * swd & 0x240 == 0x040: Stack Underflow
++			 * swd & 0x240 == 0x240: Stack Overflow
++			 * User must clear the SF bit (0x40) if set
++			 */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
 +			break;
-+		printk("%s =======================\n", log_lvl);
 +	}
++	force_sig_info(SIGFPE, &info, task);
 +}
 +
-+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
 +{
-+	show_trace_log_lvl(task, regs, stack, "");
++	ignore_fpu_irq = 1;
++	math_error((void __user *)regs->eip);
 +}
 +
-+static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+			       unsigned long *esp, char *log_lvl)
++static void simd_math_error(void __user *eip)
 +{
-+	unsigned long *stack;
-+	int i;
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short mxcsr;
 +
-+	if (esp == NULL) {
-+		if (task)
-+			esp = (unsigned long*)task->thread.esp;
-+		else
-+			esp = (unsigned long *)&esp;
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 19;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
++	/*
++	 * The SIMD FPU exceptions are handled a little differently, as there
++	 * is only a single status/control register.  Thus, to determine which
++	 * unmasked exception was caught we must mask the exception mask bits
++	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++	 */
++	mxcsr = get_fpu_mxcsr(task);
++	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
 +	}
++	force_sig_info(SIGFPE, &info, task);
++}
 +
-+	stack = esp;
-+	for(i = 0; i < kstack_depth_to_print; i++) {
-+		if (kstack_end(stack))
-+			break;
-+		if (i && ((i % 8) == 0))
-+			printk("\n%s       ", log_lvl);
-+		printk("%08lx ", *stack++);
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++					  long error_code)
++{
++	if (cpu_has_xmm) {
++		/* Handle SIMD FPU exceptions on PIII+ processors. */
++		ignore_fpu_irq = 1;
++		simd_math_error((void __user *)regs->eip);
++	} else {
++		/*
++		 * Handle strange cache flush from user space exception
++		 * in all other cases.  This is undocumented behaviour.
++		 */
++		if (regs->eflags & VM_MASK) {
++			handle_vm86_fault((struct kernel_vm86_regs *)regs,
++					  error_code);
++			return;
++		}
++		current->thread.trap_no = 19;
++		current->thread.error_code = error_code;
++		die_if_kernel("cache flush denied", regs, error_code);
++		force_sig(SIGSEGV, current);
 +	}
-+	printk("\n%sCall Trace:\n", log_lvl);
-+	show_trace_log_lvl(task, regs, esp, log_lvl);
 +}
 +
-+void show_stack(struct task_struct *task, unsigned long *esp)
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++					  long error_code)
 +{
-+	printk("       ");
-+	show_stack_log_lvl(task, NULL, esp, "");
++#if 0
++	/* No need to warn about this any longer. */
++	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++	unsigned long *switch16_ptr, *switch32_ptr;
++	struct pt_regs *regs;
++	unsigned long stack_top, stack_bot;
++	unsigned short iret_frame16_off;
++	int cpu = smp_processor_id();
++	/* reserve the space on 32bit stack for the magic switch16 pointer */
++	memmove(stk, stk + 8, sizeof(struct pt_regs));
++	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++	regs = (struct pt_regs *)stk;
++	/* now the switch32 on 16bit stack */
++	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
++	switch32_ptr = (unsigned long *)(stack_top - 8);
++	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++	/* copy iret frame on 16bit stack */
++	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
++	/* fill in the switch pointers */
++	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++	switch16_ptr[1] = __ESPFIX_SS;
++	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++		8 - CPU_16BIT_STACK_SIZE;
++	switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++	unsigned long *switch32_ptr;
++	unsigned char *stack16, *stack32;
++	unsigned long stack_top, stack_bot;
++	int len;
++	int cpu = smp_processor_id();
++	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
++	switch32_ptr = (unsigned long *)(stack_top - 8);
++	/* copy the data from 16bit stack to 32bit stack */
++	len = CPU_16BIT_STACK_SIZE - 8 - sp;
++	stack16 = (unsigned char *)(stack_bot + sp);
++	stack32 = (unsigned char *)
++		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++	memcpy(stack32, stack16, len);
++	return stack32;
 +}
++#endif
 +
 +/*
-+ * The architecture-independent dump_stack generator
++ *  'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
 + */
-+void dump_stack(void)
++asmlinkage void math_state_restore(struct pt_regs regs)
 +{
-+	unsigned long stack;
++	struct thread_info *thread = current_thread_info();
++	struct task_struct *tsk = thread->task;
 +
-+	show_trace(current, NULL, &stack);
++	/* NB. 'clts' is done for us by Xen during virtual trap. */
++	if (!tsk_used_math(tsk))
++		init_fpu(tsk);
++	restore_fpu(tsk);
++	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
 +}
 +
-+EXPORT_SYMBOL(dump_stack);
++#ifndef CONFIG_MATH_EMULATION
 +
-+void show_registers(struct pt_regs *regs)
++asmlinkage void math_emulate(long arg)
 +{
-+	int i;
-+	int in_kernel = 1;
-+	unsigned long esp;
-+	unsigned short ss;
++	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++	printk(KERN_EMERG "killing %s.\n",current->comm);
++	force_sig(SIGFPE,current);
++	schedule();
++}
 +
-+	esp = (unsigned long) (&regs->esp);
-+	savesegment(ss, ss);
-+	if (user_mode_vm(regs)) {
-+		in_kernel = 0;
-+		esp = regs->esp;
-+		ss = regs->xss & 0xffff;
-+	}
-+	print_modules();
-+	printk(KERN_EMERG "CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\n"
-+			"EFLAGS: %08lx   (%s %.*s) \n",
-+		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-+		print_tainted(), regs->eflags, system_utsname.release,
-+		(int)strcspn(system_utsname.version, " "),
-+		system_utsname.version);
-+	print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
-+	printk(KERN_EMERG "eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
-+		regs->eax, regs->ebx, regs->ecx, regs->edx);
-+	printk(KERN_EMERG "esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
-+		regs->esi, regs->edi, regs->ebp, esp);
-+	printk(KERN_EMERG "ds: %04x   es: %04x   ss: %04x\n",
-+		regs->xds & 0xffff, regs->xes & 0xffff, ss);
-+	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
-+		TASK_COMM_LEN, current->comm, current->pid,
-+		current_thread_info(), current, current->thread_info);
-+	/*
-+	 * When in-kernel, we also print out the stack and code at the
-+	 * time of the fault..
-+	 */
-+	if (in_kernel) {
-+		u8 __user *eip;
++#endif /* CONFIG_MATH_EMULATION */
 +
-+		printk("\n" KERN_EMERG "Stack: ");
-+		show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
 +
-+		printk(KERN_EMERG "Code: ");
++	/*
++	 * Update the IDT descriptor and reload the IDT so that
++	 * it uses the read-only mapped virtual address.
++	 */
++	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++	load_idt(&idt_descr);
++}
++#endif
 +
-+		eip = (u8 __user *)regs->eip - 43;
-+		for (i = 0; i < 64; i++, eip++) {
-+			unsigned char c;
 +
-+			if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
-+				printk(" Bad EIP value.");
-+				break;
-+			}
-+			if (eip == (u8 __user *)regs->eip)
-+				printk("<%02x> ", c);
-+			else
-+				printk("%02x ", c);
-+		}
-+	}
-+	printk("\n");
-+}	
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
++	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
++	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
++	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
++	{  5, 0, __KERNEL_CS, (unsigned long)bounds			},
++	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
++	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
++	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
++	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
++	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
++	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
++	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
++	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
++	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
++	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
++#ifdef CONFIG_X86_MCE
++	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
++#endif
++	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
++	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
++	{  0, 0,	   0, 0						}
++};
 +
-+static void handle_BUG(struct pt_regs *regs)
++void __init trap_init(void)
 +{
-+	unsigned long eip = regs->eip;
-+	unsigned short ud2;
-+
-+	if (eip < PAGE_OFFSET)
-+		return;
-+	if (__get_user(ud2, (unsigned short __user *)eip))
-+		return;
-+	if (ud2 != 0x0b0f)
-+		return;
++	int ret;
 +
-+	printk(KERN_EMERG "------------[ cut here ]------------\n");
++	ret = HYPERVISOR_set_trap_table(trap_table);
++	if (ret)
++		printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
 +
-+#ifdef CONFIG_DEBUG_BUGVERBOSE
-+	do {
-+		unsigned short line;
-+		char *file;
-+		char c;
++	if (cpu_has_fxsr) {
++		/*
++		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++		 * Generates a compile-time "error: zero width for bit-field" if
++		 * the alignment is wrong.
++		 */
++		struct fxsrAlignAssert {
++			int _:!(offsetof(struct task_struct,
++					thread.i387.fxsave) & 15);
++		};
 +
-+		if (__get_user(line, (unsigned short __user *)(eip + 2)))
-+			break;
-+		if (__get_user(file, (char * __user *)(eip + 4)) ||
-+		    (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
-+			file = "<bad filename>";
++		printk(KERN_INFO "Enabling fast FPU save and restore... ");
++		set_in_cr4(X86_CR4_OSFXSR);
++		printk("done.\n");
++	}
++	if (cpu_has_xmm) {
++		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++				"support... ");
++		set_in_cr4(X86_CR4_OSXMMEXCPT);
++		printk("done.\n");
++	}
 +
-+		printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
-+		return;
-+	} while (0);
-+#endif
-+	printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++	/*
++	 * Should be a barrier for any external CPU state.
++	 */
++	cpu_init();
 +}
 +
-+/* This is gone through when something in the kernel
-+ * has done something bad and is about to be terminated.
-+*/
-+void die(const char * str, struct pt_regs * regs, long err)
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
 +{
-+	static struct {
-+		spinlock_t lock;
-+		u32 lock_owner;
-+		int lock_owner_depth;
-+	} die = {
-+		.lock =			SPIN_LOCK_UNLOCKED,
-+		.lock_owner =		-1,
-+		.lock_owner_depth =	0
-+	};
-+	static int die_counter;
-+	unsigned long flags;
-+
-+	oops_enter();
++	const trap_info_t *t = trap_table;
 +
-+	if (die.lock_owner != raw_smp_processor_id()) {
-+		console_verbose();
-+		spin_lock_irqsave(&die.lock, flags);
-+		die.lock_owner = smp_processor_id();
-+		die.lock_owner_depth = 0;
-+		bust_spinlocks(1);
++	for (t = trap_table; t->address; t++) {
++		trap_ctxt[t->vector].flags = t->flags;
++		trap_ctxt[t->vector].cs = t->cs;
++		trap_ctxt[t->vector].address = t->address;
 +	}
-+	else
-+		local_save_flags(flags);
++}
 +
-+	if (++die.lock_owner_depth < 3) {
-+		int nl = 0;
-+		unsigned long esp;
-+		unsigned short ss;
++static int __init kstack_setup(char *s)
++{
++	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++	return 1;
++}
++__setup("kstack=", kstack_setup);
 +
-+		handle_BUG(regs);
-+		printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+		printk(KERN_EMERG "PREEMPT ");
-+		nl = 1;
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++	if (strcmp(s, "old") == 0)
++		call_trace = -1;
++	else if (strcmp(s, "both") == 0)
++		call_trace = 0;
++	else if (strcmp(s, "newfallback") == 0)
++		call_trace = 1;
++	else if (strcmp(s, "new") == 2)
++		call_trace = 2;
++	return 1;
++}
++__setup("call_trace=", call_trace_setup);
 +#endif
-+#ifdef CONFIG_SMP
-+		if (!nl)
-+			printk(KERN_EMERG);
-+		printk("SMP ");
-+		nl = 1;
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/traps.c
+--- a/arch/i386/kernel/traps.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/traps.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -644,18 +644,11 @@
+ 
+ static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+-	unsigned long i;
+-
+ 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ 	show_registers(regs);
+ 
+ 	/* Re-enable the IOCK line, wait for a few seconds */
+-	reason = (reason & 0xf) | 8;
+-	outb(reason, 0x61);
+-	i = 2000;
+-	while (--i) udelay(1000);
+-	reason &= ~8;
+-	outb(reason, 0x61);
++	clear_io_check_error(reason);
+ }
+ 
+ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/vm86.c
+--- a/arch/i386/kernel/vm86.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/vm86.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -97,7 +97,9 @@
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ 	struct tss_struct *tss;
 +#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+		if (!nl)
-+			printk(KERN_EMERG);
-+		printk("DEBUG_PAGEALLOC");
-+		nl = 1;
+ 	struct pt_regs *ret;
+ 	unsigned long tmp;
+ 
+@@ -122,12 +124,16 @@
+ 		do_exit(SIGSEGV);
+ 	}
+ 
++#ifndef CONFIG_X86_NO_TSS
+ 	tss = &per_cpu(init_tss, get_cpu());
 +#endif
-+		if (nl)
-+			printk("\n");
-+		if (notify_die(DIE_OOPS, str, regs, err,
-+					current->thread.trap_no, SIGSEGV) !=
-+				NOTIFY_STOP) {
-+			show_registers(regs);
-+			/* Executive summary in case the oops scrolled away */
-+			esp = (unsigned long) (&regs->esp);
-+			savesegment(ss, ss);
-+			if (user_mode(regs)) {
-+				esp = regs->esp;
-+				ss = regs->xss & 0xffff;
-+			}
-+			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
-+			print_symbol("%s", regs->eip);
-+			printk(" SS:ESP %04x:%08lx\n", ss, esp);
-+		}
-+		else
-+			regs = NULL;
-+  	} else
-+		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
+ 	current->thread.esp0 = current->thread.saved_esp0;
+ 	current->thread.sysenter_cs = __KERNEL_CS;
+ 	load_esp0(tss, &current->thread);
+ 	current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ 	put_cpu();
++#endif
+ 
+ 	loadsegment(fs, current->thread.saved_fs);
+ 	loadsegment(gs, current->thread.saved_gs);
+@@ -251,7 +257,9 @@
+ 
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ 	struct tss_struct *tss;
++#endif
+ 	long eax;
+ /*
+  * make sure the vm86() system call doesn't try to do anything silly
+@@ -296,12 +304,16 @@
+ 	savesegment(fs, tsk->thread.saved_fs);
+ 	savesegment(gs, tsk->thread.saved_gs);
+ 
++#ifndef CONFIG_X86_NO_TSS
+ 	tss = &per_cpu(init_tss, get_cpu());
++#endif
+ 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ 	if (cpu_has_sep)
+ 		tsk->thread.sysenter_cs = 0;
+ 	load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ 	put_cpu();
++#endif
+ 
+ 	tsk->thread.screen_bitmap = info->screen_bitmap;
+ 	if (info->flags & VM86_SCREEN_BITMAP)
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/vmlinux.lds.S
+--- a/arch/i386/kernel/vmlinux.lds.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/kernel/vmlinux.lds.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -13,6 +13,12 @@
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+ jiffies = jiffies_64;
 +
-+	bust_spinlocks(0);
-+	die.lock_owner = -1;
-+	spin_unlock_irqrestore(&die.lock, flags);
++PHDRS {
++	text PT_LOAD FLAGS(5);	/* R_E */
++	data PT_LOAD FLAGS(7);	/* RWE */
++	note PT_NOTE FLAGS(4);	/* R__ */
++}
+ SECTIONS
+ {
+   . = __KERNEL_START;
+@@ -26,7 +32,7 @@
+ 	KPROBES_TEXT
+ 	*(.fixup)
+ 	*(.gnu.warning)
+-	} = 0x9090
++	} :text = 0x9090
+ 
+   _etext = .;			/* End of text section */
+ 
+@@ -45,10 +51,11 @@
+   __tracedata_end = .;
+ 
+   /* writeable */
++  . = ALIGN(4096);
+   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
+ 	*(.data)
+ 	CONSTRUCTORS
+-	}
++	} :data
+ 
+   . = ALIGN(4096);
+   __nosave_begin = .;
+@@ -184,4 +191,6 @@
+   STABS_DEBUG
+ 
+   DWARF_DEBUG
 +
-+	if (!regs)
-+		return;
++  NOTES
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/kernel/vsyscall-note-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/vsyscall-note-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
 +
-+	if (kexec_should_crash(current))
-+		crash_kexec(regs);
++#include "vsyscall-note.S"
 +
-+	if (in_interrupt())
-+		panic("Fatal exception in interrupt");
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently.  This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ *	hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++	.long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++	.byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
 +
-+	if (panic_on_oops)
-+		panic("Fatal exception");
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(0, "nosegneg")
++NOTE_KERNELCAP_END
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/lib/Makefile
+--- a/arch/i386/lib/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/lib/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -7,3 +7,4 @@
+ 	bitops.o
+ 
+ lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/lib/scrub.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/lib/scrub.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,21 @@
++#include <asm/cpufeature.h>
++#include <asm/page.h>
++#include <asm/processor.h>
 +
-+	oops_exit();
-+	do_exit(SIGSEGV);
++void scrub_pages(void *v, unsigned int count)
++{
++	if (likely(cpu_has_xmm2)) {
++		unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
++
++		for (; n--; v += sizeof(long) * 4)
++			asm("movnti %1,(%0)\n\t"
++			    "movnti %1,%c2(%0)\n\t"
++			    "movnti %1,2*%c2(%0)\n\t"
++			    "movnti %1,3*%c2(%0)\n\t"
++			    : : "r" (v), "r" (0L), "i" (sizeof(long))
++			    : "memory");
++		asm volatile("sfence" : : : "memory");
++	} else
++		for (; count--; v += PAGE_SIZE)
++			clear_page(v);
 +}
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mach-xen/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mach-xen/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
 +
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++obj-y				:= setup.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mach-xen/setup.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mach-xen/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,158 @@
++/*
++ *	Machine specific setup for generic
++ */
++
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI	(1)
++#else
++#define DEFAULT_SEND_IPI	(0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
 +{
-+	if (!user_mode_vm(regs))
-+		die(str, regs, err);
++	get_option(&str, &no_broadcast);
++	printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++											"IPI Broadcast");
++	return 1;
 +}
 +
-+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
-+			      struct pt_regs * regs, long error_code,
-+			      siginfo_t *info)
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
 +{
-+	struct task_struct *tsk = current;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = trapnr;
++	printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++											"Shortcut");
++	return 0;
++}
 +
-+	if (regs->eflags & VM_MASK) {
-+		if (vm86)
-+			goto vm86_trap;
-+		goto trap_signal;
-+	}
++late_initcall(print_ipi_mode);
 +
-+	if (!user_mode(regs))
-+		goto kernel_trap;
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ *	This is included late in kernel/setup.c so that it can make
++ *	use of all of the static functions.
++ **/
 +
-+	trap_signal: {
-+		if (info)
-+			force_sig_info(signr, info, tsk);
-+		else
-+			force_sig(signr, tsk);
-+		return;
-+	}
++char * __init machine_specific_memory_setup(void)
++{
++	int rc;
++	struct xen_memory_map memmap;
++	/*
++	 * This is rather large for a stack variable but this early in
++	 * the boot process we know we have plenty slack space.
++	 */
++	struct e820entry map[E820MAX];
 +
-+	kernel_trap: {
-+		if (!fixup_exception(regs))
-+			die(str, regs, error_code);
-+		return;
-+	}
++	memmap.nr_entries = E820MAX;
++	set_xen_guest_handle(memmap.buffer, map);
 +
-+	vm86_trap: {
-+		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-+		if (ret) goto trap_signal;
-+		return;
++	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++	if ( rc == -ENOSYS ) {
++		memmap.nr_entries = 1;
++		map[0].addr = 0ULL;
++		map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++		/* 8MB slack (to balance backend allocations). */
++		map[0].size += 8ULL << 20;
++		map[0].type = E820_RAM;
++		rc = 0;
 +	}
-+}
++	BUG_ON(rc);
 +
-+#define DO_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-+}
++	sanitize_e820_map(map, (char *)&memmap.nr_entries);
 +
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-+}
++	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
 +
-+#define DO_VM86_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++	return "Xen";
 +}
 +
-+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-+}
 +
-+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
-+#ifndef CONFIG_KPROBES
-+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-+#endif
-+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
-+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
 +
-+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
-+					      long error_code)
++unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init pre_setup_arch_hook(void)
 +{
-+	current->thread.error_code = error_code;
-+	current->thread.trap_no = 13;
++	struct xen_machphys_mapping mapping;
++	unsigned long machine_to_phys_nr_ents;
++	struct xen_platform_parameters pp;
 +
-+	if (regs->eflags & VM_MASK)
-+		goto gp_in_vm86;
++	init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
 +
-+	if (!user_mode(regs))
-+		goto gp_in_kernel;
++	setup_xen_features();
 +
-+	current->thread.error_code = error_code;
-+	current->thread.trap_no = 13;
-+	force_sig(SIGSEGV, current);
-+	return;
++	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
++		set_fixaddr_top(pp.virt_start);
 +
-+gp_in_vm86:
-+	local_irq_enable();
-+	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-+	return;
++	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++		machine_to_phys_nr_ents = mapping.max_mfn + 1;
++	} else
++		machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++	machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
 +
-+gp_in_kernel:
-+	if (!fixup_exception(regs)) {
-+		if (notify_die(DIE_GPF, "general protection fault", regs,
-+				error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		die("general protection fault", regs, error_code);
-+	}
++	if (!xen_feature(XENFEAT_auto_translated_physmap))
++		phys_to_machine_mapping =
++			(unsigned long *)xen_start_info->mfn_list;
 +}
 +
-+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++void __init machine_specific_arch_setup(void)
 +{
-+	printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
-+			"to continue\n");
-+	printk(KERN_EMERG "You probably have a hardware problem with your RAM "
-+			"chips\n");
-+
-+	/* Clear and disable the memory parity error line. */
-+	clear_mem_error(reason);
-+}
++	int ret;
++	static struct callback_register __initdata event = {
++		.type = CALLBACKTYPE_event,
++		.address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++	};
++	static struct callback_register __initdata failsafe = {
++		.type = CALLBACKTYPE_failsafe,
++		.address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++	};
++	static struct callback_register __initdata nmi_cb = {
++		.type = CALLBACKTYPE_nmi,
++		.address = { __KERNEL_CS, (unsigned long)nmi },
++	};
 +
-+static void io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
-+	show_registers(regs);
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++	if (ret == 0)
++		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret == -ENOSYS)
++		ret = HYPERVISOR_set_callbacks(
++			event.address.cs, event.address.eip,
++			failsafe.address.cs, failsafe.address.eip);
++#endif
++	BUG_ON(ret);
 +
-+	/* Re-enable the IOCK line, wait for a few seconds */
-+	clear_io_check_error(reason);
-+}
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret == -ENOSYS) {
++		static struct xennmi_callback __initdata cb = {
++			.handler_address = (unsigned long)nmi
++		};
 +
-+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{
-+#ifdef CONFIG_MCA
-+	/* Might actually be able to figure out what the guilty party
-+	* is. */
-+	if( MCA_bus ) {
-+		mca_handle_nmi();
-+		return;
++		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
 +	}
 +#endif
-+	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-+		reason, smp_processor_id());
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
 +}
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/Makefile
+--- a/arch/i386/mm/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/mm/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -8,3 +8,4 @@
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++obj-$(CONFIG_XEN) += hypervisor.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/fault-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/fault-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,779 @@
++/*
++ *  linux/arch/i386/mm/fault.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ */
 +
-+static DEFINE_SPINLOCK(nmi_print_lock);
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
 +
-+void die_nmi (struct pt_regs *regs, const char *msg)
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++int register_page_fault_notifier(struct notifier_block *nb)
 +{
-+	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
-+	    NOTIFY_STOP)
-+		return;
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
 +
-+	spin_lock(&nmi_print_lock);
-+	/*
-+	* We are in trouble anyway, lets at least try
-+	* to get a message out.
-+	*/
-+	bust_spinlocks(1);
-+	printk(KERN_EMERG "%s", msg);
-+	printk(" on CPU%d, eip %08lx, registers:\n",
-+		smp_processor_id(), regs->eip);
-+	show_registers(regs);
-+	printk(KERN_EMERG "console shuts up ...\n");
-+	console_silent();
-+	spin_unlock(&nmi_print_lock);
-+	bust_spinlocks(0);
-+
-+	/* If we are in kernel we are probably nested up pretty bad
-+	 * and might aswell get out now while we still can.
-+	*/
-+	if (!user_mode_vm(regs)) {
-+		current->thread.trap_no = 2;
-+		crash_kexec(regs);
-+	}
-+
-+	do_exit(SIGSEGV);
-+}
-+
-+static void default_do_nmi(struct pt_regs * regs)
++int unregister_page_fault_notifier(struct notifier_block *nb)
 +{
-+	unsigned char reason = 0;
-+
-+	/* Only the BSP gets external NMIs from the system.  */
-+	if (!smp_processor_id())
-+		reason = get_nmi_reason();
-+ 
-+	if (!(reason & 0xc0)) {
-+		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+							== NOTIFY_STOP)
-+			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/*
-+		 * Ok, so this is none of the documented NMI sources,
-+		 * so it must be the NMI watchdog.
-+		 */
-+		if (nmi_watchdog) {
-+			nmi_watchdog_tick(regs);
-+			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
-+		return;
-+	}
-+	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+		return;
-+	if (reason & 0x80)
-+		mem_parity_error(reason, regs);
-+	if (reason & 0x40)
-+		io_check_error(reason, regs);
-+	/*
-+	 * Reassert NMI in case it became active meanwhile
-+	 * as it's edge-triggered.
-+	 */
-+	reassert_nmi();
++	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
 +}
 +
-+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++static inline int notify_page_fault(enum die_val val, const char *str,
++			struct pt_regs *regs, long err, int trap, int sig)
 +{
-+	return 0;
++	struct die_args args = {
++		.regs = regs,
++		.str = str,
++		.err = err,
++		.trapnr = trap,
++		.signr = sig
++	};
++	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
 +}
-+ 
-+static nmi_callback_t nmi_callback = dummy_nmi_callback;
-+ 
-+fastcall void do_nmi(struct pt_regs * regs, long error_code)
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++			struct pt_regs *regs, long err, int trap, int sig)
 +{
-+	int cpu;
-+
-+	nmi_enter();
-+
-+	cpu = smp_processor_id();
-+
-+	++nmi_count(cpu);
-+
-+	if (!rcu_dereference(nmi_callback)(regs, cpu))
-+		default_do_nmi(regs);
-+
-+	nmi_exit();
++	return NOTIFY_DONE;
 +}
++#endif
 +
-+void set_nmi_callback(nmi_callback_t callback)
-+{
-+	vmalloc_sync_all();
-+	rcu_assign_pointer(nmi_callback, callback);
-+}
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
 +
-+void unset_nmi_callback(void)
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out 
++ */
++void bust_spinlocks(int yes)
 +{
-+	nmi_callback = dummy_nmi_callback;
-+}
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
++	int loglevel_save = console_loglevel;
 +
-+#ifdef CONFIG_KPROBES
-+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
-+{
-+	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-+			== NOTIFY_STOP)
++	if (yes) {
++		oops_in_progress = 1;
 +		return;
-+	/* This is an interrupt gate, because kprobes wants interrupts
-+	disabled.  Normal trap handlers don't. */
-+	restore_interrupts(regs);
-+	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
-+}
++	}
++#ifdef CONFIG_VT
++	unblank_screen();
 +#endif
++	oops_in_progress = 0;
++	/*
++	 * OK, the message is on the console.  Now we call printk()
++	 * without oops_in_progress set so that printk will give klogd
++	 * a poke.  Hold onto your hats...
++	 */
++	console_loglevel = 15;		/* NMI oopser may have shut the console up */
++	printk(" ");
++	console_loglevel = loglevel_save;
++}
 +
 +/*
-+ * Our handling of the processor debug registers is non-trivial.
-+ * We do not clear them on entry and exit from the kernel. Therefore
-+ * it is possible to get a watchpoint trap here from inside the kernel.
-+ * However, the code in ./ptrace.c has ensured that the user can
-+ * only set watchpoints on userspace addresses. Therefore the in-kernel
-+ * watchpoint trap can only occur in code which is reading/writing
-+ * from user space. Such code must not hold kernel locks (since it
-+ * can equally take a page fault), therefore it is safe to call
-+ * force_sig_info even though that claims and releases locks.
-+ * 
-+ * Code in ./signal.c ensures that the debug control register
-+ * is restored before we deliver any signal, and therefore that
-+ * user code runs with the correct debug control register even though
-+ * we clear it here.
++ * Return EIP plus the CS segment base.  The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
 + *
-+ * Being careful here means that we don't have to be as careful in a
-+ * lot of more complicated places (task switching can be a bit lazy
-+ * about restoring all the debug state, and ptrace doesn't have to
-+ * find every occurrence of the TF bit that could be saved away even
-+ * by user code)
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ * 
++ * This is slow, but is very rarely executed.
 + */
-+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++					    unsigned long *eip_limit)
 +{
-+	unsigned int condition;
-+	struct task_struct *tsk = current;
-+
-+	get_debugreg(condition, 6);
-+
-+	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+					SIGTRAP) == NOTIFY_STOP)
-+		return;
-+	/* It's safe to allow irq's after DR6 has been saved */
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
++	unsigned long eip = regs->eip;
++	unsigned seg = regs->xcs & 0xffff;
++	u32 seg_ar, seg_limit, base, *desc;
 +
-+	/* Mask out spurious debug traps due to lazy DR7 setting */
-+	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+		if (!tsk->thread.debugreg[7])
-+			goto clear_dr7;
++	/* Unlikely, but must come before segment checks. */
++	if (unlikely(regs->eflags & VM_MASK)) {
++		base = seg << 4;
++		*eip_limit = base + 0xffff;
++		return base + (eip & 0xffff);
 +	}
 +
-+	if (regs->eflags & VM_MASK)
-+		goto debug_vm86;
-+
-+	/* Save debug status register where ptrace can see it */
-+	tsk->thread.debugreg[6] = condition;
++	/* The standard kernel/user address space limit. */
++	*eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++	
++	/* By far the most common cases. */
++	if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++		return eip;
 +
-+	/*
-+	 * Single-stepping through TF: make sure we ignore any events in
-+	 * kernel space (but re-enable TF when returning to user mode).
-+	 */
-+	if (condition & DR_STEP) {
-+		/*
-+		 * We already checked v86 mode above, so we can
-+		 * check for kernel mode by just checking the CPL
-+		 * of CS.
-+		 */
-+		if (!user_mode(regs))
-+			goto clear_TF_reenable;
++	/* Check the segment exists, is within the current LDT/GDT size,
++	   that kernel/user (ring 0..3) has the appropriate privilege,
++	   that it's a code segment, and get the limit. */
++	__asm__ ("larl %3,%0; lsll %3,%1"
++		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++	if ((~seg_ar & 0x9800) || eip > seg_limit) {
++		*eip_limit = 0;
++		return 1;	 /* So that returned eip > *eip_limit. */
 +	}
 +
-+	/* Ok, finally something we can handle */
-+	send_sigtrap(tsk, regs, error_code);
++	/* Get the GDT/LDT descriptor base. 
++	   When you look for races in this code remember that
++	   LDT and other horrors are only used in user space. */
++	if (seg & (1<<2)) {
++		/* Must lock the LDT while reading it. */
++		down(&current->mm->context.sem);
++		desc = current->mm->context.ldt;
++		desc = (void *)desc + (seg & ~7);
++	} else {
++		/* Must disable preemption while reading the GDT. */
++ 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
++		desc = (void *)desc + (seg & ~7);
++	}
 +
-+	/* Disable additional traps. They'll be re-enabled when
-+	 * the signal is delivered.
-+	 */
-+clear_dr7:
-+	set_debugreg(0, 7);
-+	return;
++	/* Decode the code segment base from the descriptor */
++	base = get_desc_base((unsigned long *)desc);
 +
-+debug_vm86:
-+	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-+	return;
++	if (seg & (1<<2)) { 
++		up(&current->mm->context.sem);
++	} else
++		put_cpu();
 +
-+clear_TF_reenable:
-+	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+	regs->eflags &= ~TF_MASK;
-+	return;
++	/* Adjust EIP and segment limit, and clamp at the kernel limit.
++	   It's legitimate for segments to wrap at 0xffffffff. */
++	seg_limit += base;
++	if (seg_limit < *eip_limit && seg_limit >= base)
++		*eip_limit = seg_limit;
++	return eip + base;
 +}
 +
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
++/* 
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
 + */
-+void math_error(void __user *eip)
-+{
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short cwd, swd;
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{ 
++	unsigned long limit;
++	unsigned long instr = get_segment_eip (regs, &limit);
++	int scan_more = 1;
++	int prefetch = 0; 
++	int i;
 +
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 16;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-+	 * C1 reg you need in case of a stack fault, 0x040 is the stack
-+	 * fault bit.  We should only be taking one exception at a time,
-+	 * so if this combination doesn't produce any single exception,
-+	 * then we have a bad program that isn't syncronizing its FPU usage
-+	 * and it will suffer the consequences since we won't be able to
-+	 * fully reproduce the context of the exception
-+	 */
-+	cwd = get_fpu_cwd(task);
-+	swd = get_fpu_swd(task);
-+	switch (swd & ~cwd & 0x3f) {
-+		case 0x000: /* No unmasked exception */
-+			return;
-+		default:    /* Multiple exceptions */
-+			break;
-+		case 0x001: /* Invalid Op */
-+			/*
-+			 * swd & 0x240 == 0x040: Stack Underflow
-+			 * swd & 0x240 == 0x240: Stack Overflow
-+			 * User must clear the SF bit (0x40) if set
-+			 */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
++	for (i = 0; scan_more && i < 15; i++) { 
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
++
++		if (instr > limit)
 +			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
++		if (__get_user(opcode, (unsigned char __user *) instr))
++			break; 
++
++		instr_hi = opcode & 0xf0; 
++		instr_lo = opcode & 0x0f; 
++		instr++;
++
++		switch (instr_hi) { 
++		case 0x20:
++		case 0x30:
++			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++			scan_more = ((instr_lo & 7) == 0x6);
 +			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
++			
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;		
++		case 0xF0:
++			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;			
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++			if (instr > limit)
++				break;
++			if (__get_user(opcode, (unsigned char __user *) instr))
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;			
++		default:
++			scan_more = 0;
 +			break;
++		} 
 +	}
-+	force_sig_info(SIGFPE, &info, task);
++	return prefetch;
 +}
 +
-+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++			      unsigned long error_code)
 +{
-+	ignore_fpu_irq = 1;
-+	math_error((void __user *)regs->eip);
-+}
++	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++		     boot_cpu_data.x86 >= 6)) {
++		/* Catch an obscure case of prefetch inside an NX page. */
++		if (nx_enabled && (error_code & 16))
++			return 0;
++		return __is_prefetch(regs, addr);
++	}
++	return 0;
++} 
 +
-+static void simd_math_error(void __user *eip)
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++	unsigned long address, struct task_struct *tsk)
 +{
-+	struct task_struct * task;
 +	siginfo_t info;
-+	unsigned short mxcsr;
 +
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 19;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
++	info.si_signo = si_signo;
 +	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * The SIMD FPU exceptions are handled a little differently, as there
-+	 * is only a single status/control register.  Thus, to determine which
-+	 * unmasked exception was caught we must mask the exception mask bits
-+	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+	 */
-+	mxcsr = get_fpu_mxcsr(task);
-+	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
-+	}
-+	force_sig_info(SIGFPE, &info, task);
++	info.si_code = si_code;
++	info.si_addr = (void __user *)address;
++	force_sig_info(si_signo, &info, tsk);
 +}
 +
-+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+					  long error_code)
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
 +{
-+	if (cpu_has_xmm) {
-+		/* Handle SIMD FPU exceptions on PIII+ processors. */
-+		ignore_fpu_irq = 1;
-+		simd_math_error((void __user *)regs->eip);
-+	} else {
-+		/*
-+		 * Handle strange cache flush from user space exception
-+		 * in all other cases.  This is undocumented behaviour.
-+		 */
-+		if (regs->eflags & VM_MASK) {
-+			handle_vm86_fault((struct kernel_vm86_regs *)regs,
-+					  error_code);
++	unsigned long *p, page;
++	unsigned long mfn; 
++
++	page = read_cr3();
++	p  = (unsigned long *)__va(page);
++	p += (address >> 30) * 2;
++	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++	if (p[0] & _PAGE_PRESENT) {
++		mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++		p  = (unsigned long *)__va(page);
++		address &= 0x3fffffff;
++		p += (address >> 21) * 2;
++		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
++		       page, p[1], p[0]);
++		mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++		if (mfn_to_pfn(mfn) >= highstart_pfn)
 +			return;
++#endif
++		if (p[0] & _PAGE_PRESENT) {
++			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++			p  = (unsigned long *) __va(page);
++			address &= 0x001fffff;
++			p += (address >> 12) * 2;
++			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++			       page, p[1], p[0]);
 +		}
-+		current->thread.trap_no = 19;
-+		current->thread.error_code = error_code;
-+		die_if_kernel("cache flush denied", regs, error_code);
-+		force_sig(SIGSEGV, current);
 +	}
 +}
-+
-+#ifndef CONFIG_XEN
-+fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
-+					  long error_code)
++#else
++static void dump_fault_path(unsigned long address)
 +{
-+#if 0
-+	/* No need to warn about this any longer. */
-+	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++	unsigned long page;
++
++	page = read_cr3();
++	page = ((unsigned long *) __va(page))[address >> 22];
++	if (oops_may_print())
++		printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++		       machine_to_phys(page));
++	/*
++	 * We must not directly access the pte in the highpte
++	 * case if the page table is located in highmem.
++	 * And lets rather not kmap-atomic the pte, just in case
++	 * it's allocated already.
++	 */
++#ifdef CONFIG_HIGHPTE
++	if ((page >> PAGE_SHIFT) >= highstart_pfn)
++		return;
 +#endif
++	if ((page & 1) && oops_may_print()) {
++		page &= PAGE_MASK;
++		address &= 0x003ff000;
++		page = machine_to_phys(page);
++		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++		       machine_to_phys(page));
++	}
 +}
++#endif
 +
-+fastcall void setup_x86_bogus_stack(unsigned char * stk)
-+{
-+	unsigned long *switch16_ptr, *switch32_ptr;
-+	struct pt_regs *regs;
-+	unsigned long stack_top, stack_bot;
-+	unsigned short iret_frame16_off;
-+	int cpu = smp_processor_id();
-+	/* reserve the space on 32bit stack for the magic switch16 pointer */
-+	memmove(stk, stk + 8, sizeof(struct pt_regs));
-+	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-+	regs = (struct pt_regs *)stk;
-+	/* now the switch32 on 16bit stack */
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-+	/* copy iret frame on 16bit stack */
-+	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-+	/* fill in the switch pointers */
-+	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-+	switch16_ptr[1] = __ESPFIX_SS;
-+	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-+		8 - CPU_16BIT_STACK_SIZE;
-+	switch32_ptr[1] = __KERNEL_DS;
-+}
-+
-+fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
-+{
-+	unsigned long *switch32_ptr;
-+	unsigned char *stack16, *stack32;
-+	unsigned long stack_top, stack_bot;
-+	int len;
-+	int cpu = smp_processor_id();
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	/* copy the data from 16bit stack to 32bit stack */
-+	len = CPU_16BIT_STACK_SIZE - 8 - sp;
-+	stack16 = (unsigned char *)(stack_bot + sp);
-+	stack32 = (unsigned char *)
-+		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-+	memcpy(stack32, stack16, len);
-+	return stack32;
-+}
-+#endif
-+
-+/*
-+ *  'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ *
-+ * Must be called with kernel preemption disabled (in this case,
-+ * local interrupts are disabled at the call-site in entry.S).
-+ */
-+asmlinkage void math_state_restore(struct pt_regs regs)
++static int spurious_fault(struct pt_regs *regs,
++			  unsigned long address,
++			  unsigned long error_code)
 +{
-+	struct thread_info *thread = current_thread_info();
-+	struct task_struct *tsk = thread->task;
-+
-+	/* NB. 'clts' is done for us by Xen during virtual trap. */
-+	if (!tsk_used_math(tsk))
-+		init_fpu(tsk);
-+	restore_fpu(tsk);
-+	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
-+}
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
 +
-+#ifndef CONFIG_MATH_EMULATION
++	/* Reserved-bit violation or user access to kernel space? */
++	if (error_code & 0x0c)
++		return 0;
 +
-+asmlinkage void math_emulate(long arg)
-+{
-+	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
-+	printk(KERN_EMERG "killing %s.\n",current->comm);
-+	force_sig(SIGFPE,current);
-+	schedule();
-+}
++	pgd = init_mm.pgd + pgd_index(address);
++	if (!pgd_present(*pgd))
++		return 0;
 +
-+#endif /* CONFIG_MATH_EMULATION */
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return 0;
 +
-+#ifdef CONFIG_X86_F00F_BUG
-+void __init trap_init_f00f_bug(void)
-+{
-+	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return 0;
 +
-+	/*
-+	 * Update the IDT descriptor and reload the IDT so that
-+	 * it uses the read-only mapped virtual address.
-+	 */
-+	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+	load_idt(&idt_descr);
-+}
++	pte = pte_offset_kernel(pmd, address);
++	if (!pte_present(*pte))
++		return 0;
++	if ((error_code & 0x02) && !pte_write(*pte))
++		return 0;
++#ifdef CONFIG_X86_PAE
++	if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
++		return 0;
 +#endif
 +
++	return 1;
++}
 +
-+/*
-+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
-+ * for those that specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
-+	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
-+	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
-+	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
-+	{  5, 0, __KERNEL_CS, (unsigned long)bounds			},
-+	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
-+	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
-+	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-+	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
-+	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
-+	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
-+	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
-+	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
-+	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
-+	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
-+	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
-+#ifdef CONFIG_X86_MCE
-+	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
-+#endif
-+	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
-+	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
-+	{  0, 0,	   0, 0						}
-+};
-+
-+void __init trap_init(void)
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 +{
-+	HYPERVISOR_set_trap_table(trap_table);
++	unsigned index = pgd_index(address);
++	pgd_t *pgd_k;
++	pud_t *pud, *pud_k;
++	pmd_t *pmd, *pmd_k;
 +
-+	if (cpu_has_fxsr) {
-+		/*
-+		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
-+		 * Generates a compile-time "error: zero width for bit-field" if
-+		 * the alignment is wrong.
-+		 */
-+		struct fxsrAlignAssert {
-+			int _:!(offsetof(struct task_struct,
-+					thread.i387.fxsave) & 15);
-+		};
++	pgd += index;
++	pgd_k = init_mm.pgd + index;
 +
-+		printk(KERN_INFO "Enabling fast FPU save and restore... ");
-+		set_in_cr4(X86_CR4_OSFXSR);
-+		printk("done.\n");
-+	}
-+	if (cpu_has_xmm) {
-+		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
-+				"support... ");
-+		set_in_cr4(X86_CR4_OSXMMEXCPT);
-+		printk("done.\n");
-+	}
++	if (!pgd_present(*pgd_k))
++		return NULL;
 +
 +	/*
-+	 * Should be a barrier for any external CPU state.
++	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
++	 * and redundant with the set_pmd() on non-PAE. As would
++	 * set_pud.
 +	 */
-+	cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+	trap_info_t *t = trap_table;
-+
-+	for (t = trap_table; t->address; t++) {
-+		trap_ctxt[t->vector].flags = t->flags;
-+		trap_ctxt[t->vector].cs = t->cs;
-+		trap_ctxt[t->vector].address = t->address;
-+	}
-+}
 +
-+static int __init kstack_setup(char *s)
-+{
-+	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-+	return 1;
-+}
-+__setup("kstack=", kstack_setup);
++	pud = pud_offset(pgd, address);
++	pud_k = pud_offset(pgd_k, address);
++	if (!pud_present(*pud_k))
++		return NULL;
 +
-+#ifdef CONFIG_STACK_UNWIND
-+static int __init call_trace_setup(char *s)
-+{
-+	if (strcmp(s, "old") == 0)
-+		call_trace = -1;
-+	else if (strcmp(s, "both") == 0)
-+		call_trace = 0;
-+	else if (strcmp(s, "newfallback") == 0)
-+		call_trace = 1;
-+	else if (strcmp(s, "new") == 2)
-+		call_trace = 2;
-+	return 1;
-+}
-+__setup("call_trace=", call_trace_setup);
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/vm86.c tmp-linux-2.6-xen.patch/arch/i386/kernel/vm86.c
---- pristine-linux-2.6.18.2/arch/i386/kernel/vm86.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/vm86.c	2007-07-30 16:35:11.000000000 +0200
-@@ -97,7 +97,9 @@
- struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
- struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- {
-+#ifndef CONFIG_X86_NO_TSS
- 	struct tss_struct *tss;
-+#endif
- 	struct pt_regs *ret;
- 	unsigned long tmp;
- 
-@@ -122,12 +124,16 @@ struct pt_regs * fastcall save_v86_state
- 		do_exit(SIGSEGV);
- 	}
- 
-+#ifndef CONFIG_X86_NO_TSS
- 	tss = &per_cpu(init_tss, get_cpu());
-+#endif
- 	current->thread.esp0 = current->thread.saved_esp0;
- 	current->thread.sysenter_cs = __KERNEL_CS;
- 	load_esp0(tss, &current->thread);
- 	current->thread.saved_esp0 = 0;
-+#ifndef CONFIG_X86_NO_TSS
- 	put_cpu();
-+#endif
- 
- 	loadsegment(fs, current->thread.saved_fs);
- 	loadsegment(gs, current->thread.saved_gs);
-@@ -251,7 +257,9 @@ out:
- 
- static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
- {
-+#ifndef CONFIG_X86_NO_TSS
- 	struct tss_struct *tss;
-+#endif
- 	long eax;
- /*
-  * make sure the vm86() system call doesn't try to do anything silly
-@@ -296,12 +304,16 @@ static void do_sys_vm86(struct kernel_vm
- 	savesegment(fs, tsk->thread.saved_fs);
- 	savesegment(gs, tsk->thread.saved_gs);
- 
-+#ifndef CONFIG_X86_NO_TSS
- 	tss = &per_cpu(init_tss, get_cpu());
-+#endif
- 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
- 	if (cpu_has_sep)
- 		tsk->thread.sysenter_cs = 0;
- 	load_esp0(tss, &tsk->thread);
-+#ifndef CONFIG_X86_NO_TSS
- 	put_cpu();
++	pmd = pmd_offset(pud, address);
++	pmd_k = pmd_offset(pud_k, address);
++	if (!pmd_present(*pmd_k))
++		return NULL;
++	if (!pmd_present(*pmd))
++#if CONFIG_XEN_COMPAT > 0x030002
++		set_pmd(pmd, *pmd_k);
++#else
++		/*
++		 * When running on older Xen we must launder *pmd_k through
++		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++		 */
++		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
 +#endif
- 
- 	tsk->thread.screen_bitmap = info->screen_bitmap;
- 	if (info->flags & VM86_SCREEN_BITMAP)
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/vmlinux.lds.S tmp-linux-2.6-xen.patch/arch/i386/kernel/vmlinux.lds.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/vmlinux.lds.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/vmlinux.lds.S	2007-07-30 16:35:11.000000000 +0200
-@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
- OUTPUT_ARCH(i386)
- ENTRY(phys_startup_32)
- jiffies = jiffies_64;
-+
-+PHDRS {
-+	text PT_LOAD FLAGS(5);	/* R_E */
-+	data PT_LOAD FLAGS(7);	/* RWE */
-+	note PT_NOTE FLAGS(4);	/* R__ */
++	else
++		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++	return pmd_k;
 +}
- SECTIONS
- {
-   . = __KERNEL_START;
-@@ -26,7 +32,7 @@ SECTIONS
- 	KPROBES_TEXT
- 	*(.fixup)
- 	*(.gnu.warning)
--	} = 0x9090
-+	} :text = 0x9090
- 
-   _etext = .;			/* End of text section */
- 
-@@ -45,10 +51,11 @@ SECTIONS
-   __tracedata_end = .;
- 
-   /* writeable */
-+  . = ALIGN(4096);
-   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
- 	*(.data)
- 	CONSTRUCTORS
--	}
-+	} :data
- 
-   . = ALIGN(4096);
-   __nosave_begin = .;
-@@ -184,4 +191,6 @@ SECTIONS
-   STABS_DEBUG
- 
-   DWARF_DEBUG
-+
-+  NOTES
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/kernel/vsyscall-note-xen.S tmp-linux-2.6-xen.patch/arch/i386/kernel/vsyscall-note-xen.S
---- pristine-linux-2.6.18.2/arch/i386/kernel/vsyscall-note-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/kernel/vsyscall-note-xen.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,32 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ * First we get the vanilla i386 note that supplies the kernel version info.
-+ */
-+
-+#include "vsyscall-note.S"
 +
 +/*
-+ * Now we add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently.  This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ *	hwcap 0 nosegneg
-+ * to match the mapping of bit to name that we give here.
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
 + */
-+#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
-+	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
-+	.long ncaps, mask
-+#define NOTE_KERNELCAP(bit, name) \
-+	.byte bit; .asciz name
-+#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
-+
-+NOTE_KERNELCAP_BEGIN(1, 1)
-+NOTE_KERNELCAP(0, "nosegneg")
-+NOTE_KERNELCAP_END
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mach-xen/Makefile tmp-linux-2.6-xen.patch/arch/i386/mach-xen/Makefile
---- pristine-linux-2.6.18.2/arch/i386/mach-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mach-xen/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the linux kernel.
-+#
++static inline int vmalloc_fault(unsigned long address)
++{
++	unsigned long pgd_paddr;
++	pmd_t *pmd_k;
++	pte_t *pte_k;
++	/*
++	 * Synchronize this task's top level page-table
++	 * with the 'reference' page table.
++	 *
++	 * Do _not_ use "current" here. We might be inside
++	 * an interrupt in the middle of a task switch..
++	 */
++	pgd_paddr = read_cr3();
++	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++	if (!pmd_k)
++		return -1;
++	pte_k = pte_offset_kernel(pmd_k, address);
++	if (!pte_present(*pte_k))
++		return -1;
++	return 0;
++}
 +
-+obj-y				:= setup.o
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mach-xen/setup.c tmp-linux-2.6-xen.patch/arch/i386/mach-xen/setup.c
---- pristine-linux-2.6.18.2/arch/i386/mach-xen/setup.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mach-xen/setup.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,147 @@
 +/*
-+ *	Machine specific setup for generic
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ *	bit 0 == 0 means no page found, 1 means protection fault
++ *	bit 1 == 0 means read, 1 means write
++ *	bit 2 == 0 means kernel, 1 means user-mode
++ *	bit 3 == 1 means use of reserved bit detected
++ *	bit 4 == 1 means fault was an instruction fetch
 + */
-+
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <asm/acpi.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/e820.h>
-+#include <asm/setup.h>
-+#include <asm/fixmap.h>
-+
-+#include <xen/interface/callback.h>
-+#include <xen/interface/memory.h>
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define DEFAULT_SEND_IPI	(1)
-+#else
-+#define DEFAULT_SEND_IPI	(0)
-+#endif
-+
-+int no_broadcast=DEFAULT_SEND_IPI;
-+
-+static __init int no_ipi_broadcast(char *str)
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++				      unsigned long error_code)
 +{
-+	get_option(&str, &no_broadcast);
-+	printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
-+											"IPI Broadcast");
-+	return 1;
-+}
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	unsigned long address;
++	int write, si_code;
 +
-+__setup("no_ipi_broadcast", no_ipi_broadcast);
++	/* get the address */
++        address = read_cr2();
 +
-+static int __init print_ipi_mode(void)
-+{
-+	printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
-+											"Shortcut");
-+	return 0;
-+}
++	/* Set the "privileged fault" bit to something sane. */
++	error_code &= ~4;
++	error_code |= (regs->xcs & 2) << 1;
++	if (regs->eflags & X86_EFLAGS_VM)
++		error_code |= 4;
 +
-+late_initcall(print_ipi_mode);
++	tsk = current;
 +
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ *	This is included late in kernel/setup.c so that it can make
-+ *	use of all of the static functions.
-+ **/
++	si_code = SEGV_MAPERR;
 +
-+char * __init machine_specific_memory_setup(void)
-+{
-+	int rc;
-+	struct xen_memory_map memmap;
 +	/*
-+	 * This is rather large for a stack variable but this early in
-+	 * the boot process we know we have plenty slack space.
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 9) == 0.
 +	 */
-+	struct e820entry map[E820MAX];
-+
-+	memmap.nr_entries = E820MAX;
-+	set_xen_guest_handle(memmap.buffer, map);
-+
-+	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
-+	if ( rc == -ENOSYS ) {
-+		memmap.nr_entries = 1;
-+		map[0].addr = 0ULL;
-+		map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
-+		/* 8MB slack (to balance backend allocations). */
-+		map[0].size += 8ULL << 20;
-+		map[0].type = E820_RAM;
-+		rc = 0;
++	if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++		/* Faults in hypervisor area can never be patched up. */
++		if (address >= hypervisor_virt_start)
++			goto bad_area_nosemaphore;
++#endif
++		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++			return;
++		/* Can take a spurious fault if mapping changes R/O -> R/W. */
++		if (spurious_fault(regs, address, error_code))
++			return;
++		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++						SIGSEGV) == NOTIFY_STOP)
++			return;
++		/* 
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
 +	}
-+	BUG_ON(rc);
 +
-+	sanitize_e820_map(map, (char *)&memmap.nr_entries);
++	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++					SIGSEGV) == NOTIFY_STOP)
++		return;
 +
-+	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
++	   fault has been handled. */
++	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++		local_irq_enable();
 +
-+	return "Xen";
-+}
++	mm = tsk->mm;
 +
++	/*
++	 * If we're in an interrupt, have no user context or are running in an
++	 * atomic region then we must not take the fault..
++	 */
++	if (in_atomic() || !mm)
++		goto bad_area_nosemaphore;
 +
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
++	 * erroneous fault occurring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibilty of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & 4) == 0 &&
++		    !search_exception_tables(regs->eip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
++	}
 +
-+unsigned long *machine_to_phys_mapping;
-+EXPORT_SYMBOL(machine_to_phys_mapping);
-+unsigned int machine_to_phys_order;
-+EXPORT_SYMBOL(machine_to_phys_order);
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= address)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & 4) {
++		/*
++		 * Accessing the stack below %esp is always a bug.
++		 * The large cushion allows instructions like enter
++		 * and pusha to work.  ("enter $65535,$31" pushes
++		 * 32 pointers and then decrements %esp by 65535.)
++		 */
++		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & 3) {
++		default:	/* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++			if (regs->cs == GET_KERNEL_CS())
++				printk("WP fault at %08lx\n", regs->eip);
++#endif
++			/* fall through */
++		case 2:		/* write, not present */
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++			write++;
++			break;
++		case 1:		/* read, present */
++			goto bad_area;
++		case 0:		/* read, not present */
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++				goto bad_area;
++	}
 +
-+void __init machine_specific_arch_setup(void)
-+{
-+	int ret;
-+	struct xen_machphys_mapping mapping;
-+	unsigned long machine_to_phys_nr_ents;
-+	struct xen_platform_parameters pp;
-+	static struct callback_register __initdata event = {
-+		.type = CALLBACKTYPE_event,
-+		.address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
-+	};
-+	static struct callback_register __initdata failsafe = {
-+		.type = CALLBACKTYPE_failsafe,
-+		.address = { __KERNEL_CS, (unsigned long)failsafe_callback },
-+	};
-+	static struct callback_register __initdata nmi_cb = {
-+		.type = CALLBACKTYPE_nmi,
-+		.address = { __KERNEL_CS, (unsigned long)nmi },
-+	};
++ survive:
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++	switch (handle_mm_fault(mm, vma, address, write)) {
++		case VM_FAULT_MINOR:
++			tsk->min_flt++;
++			break;
++		case VM_FAULT_MAJOR:
++			tsk->maj_flt++;
++			break;
++		case VM_FAULT_SIGBUS:
++			goto do_sigbus;
++		case VM_FAULT_OOM:
++			goto out_of_memory;
++		default:
++			BUG();
++	}
 +
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
-+	if (ret == 0)
-+		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret == -ENOSYS)
-+		ret = HYPERVISOR_set_callbacks(
-+			event.address.cs, event.address.eip,
-+			failsafe.address.cs, failsafe.address.eip);
-+#endif
-+	BUG_ON(ret);
++	/*
++	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 */
++	if (regs->eflags & VM_MASK) {
++		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++		if (bit < 32)
++			tsk->thread.screen_bitmap |= 1 << bit;
++	}
++	up_read(&mm->mmap_sem);
++	return;
 +
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret == -ENOSYS) {
-+		static struct xennmi_callback __initdata cb = {
-+			.handler_address = (unsigned long)nmi
-+		};
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
 +
-+		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & 4) {
++		/* 
++		 * Valid to do another page fault here because this one came 
++		 * from user space.
++		 */
++		if (is_prefetch(regs, address, error_code))
++			return;
++
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++		return;
 +	}
-+#endif
 +
-+	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
-+		set_fixaddr_top(pp.virt_start);
++#ifdef CONFIG_X86_F00F_BUG
++	/*
++	 * Pentium F0 0F C7 C8 bug workaround.
++	 */
++	if (boot_cpu_data.f00f_bug) {
++		unsigned long nr;
++		
++		nr = (address - idt_descr.address) >> 3;
 +
-+	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
-+	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
-+	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
-+		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
-+		machine_to_phys_nr_ents = mapping.max_mfn + 1;
++		if (nr == 6) {
++			do_invalid_op(regs, 0);
++			return;
++		}
 +	}
-+	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
-+		machine_to_phys_order++;
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/Makefile tmp-linux-2.6-xen.patch/arch/i386/Makefile
---- pristine-linux-2.6.18.2/arch/i386/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -48,6 +48,11 @@ CFLAGS				+= $(shell if [ $(call cc-vers
- 
- CFLAGS += $(cflags-y)
- 
-+cppflags-$(CONFIG_XEN) += \
-+	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++#endif
 +
-+CPPFLAGS += $(cppflags-y)
++no_context:
++	/* Are we prepared to handle this kernel fault?  */
++	if (fixup_exception(regs))
++		return;
 +
- # Default subarch .c files
- mcore-y  := mach-default
- 
-@@ -71,6 +76,10 @@ mcore-$(CONFIG_X86_BIGSMP)	:= mach-defau
- mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
- mcore-$(CONFIG_X86_SUMMIT)  := mach-default
- 
-+# Xen subarch support
-+mflags-$(CONFIG_X86_XEN)	:= -Iinclude/asm-i386/mach-xen
-+mcore-$(CONFIG_X86_XEN)		:= mach-xen
++	/* 
++	 * Valid to do another page fault here, because if this fault
++	 * had been triggered by is_prefetch fixup_exception would have 
++	 * handled it.
++	 */
++ 	if (is_prefetch(regs, address, error_code))
++ 		return;
 +
- # generic subarchitecture
- mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
- mcore-$(CONFIG_X86_GENERICARCH) := mach-default
-@@ -105,6 +114,19 @@ boot := arch/i386/boot
- PHONY += zImage bzImage compressed zlilo bzlilo \
-          zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
- 
-+ifdef CONFIG_XEN
-+CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+all: vmlinuz
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
 +
-+vmlinuz: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) $@
++	bust_spinlocks(1);
++
++	if (oops_may_print()) {
++	#ifdef CONFIG_X86_PAE
++		if (error_code & 16) {
++			pte_t *pte = lookup_address(address);
++
++			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++				printk(KERN_CRIT "kernel tried to execute "
++					"NX-protected page - exploit attempt? "
++					"(uid: %d)\n", current->uid);
++		}
++	#endif
++		if (address < PAGE_SIZE)
++			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++					"pointer dereference");
++		else
++			printk(KERN_ALERT "BUG: unable to handle kernel paging"
++					" request");
++		printk(" at virtual address %08lx\n",address);
++		printk(KERN_ALERT " printing eip:\n");
++		printk("%08lx\n", regs->eip);
++	}
++	dump_fault_path(address);
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++	die("Oops", regs, error_code);
++	bust_spinlocks(0);
++	do_exit(SIGKILL);
 +
-+install:
-+	$(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- all: bzImage
- 
- # KBUILD_IMAGE specify target image being built
-@@ -127,6 +149,7 @@ fdimage fdimage144 fdimage288 isoimage: 
- 
- install:
- 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
-+endif
- 
- archclean:
- 	$(Q)$(MAKE) $(clean)=arch/i386/boot
-@@ -145,3 +168,4 @@ endef
- CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
- 	       arch/$(ARCH)/boot/image.iso \
- 	       arch/$(ARCH)/boot/mtools.conf
-+CLEAN_FILES += vmlinuz vmlinux-stripped
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/fault-xen.c tmp-linux-2.6-xen.patch/arch/i386/mm/fault-xen.c
---- pristine-linux-2.6.18.2/arch/i386/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/fault-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,782 @@
 +/*
-+ *  linux/arch/i386/mm/fault.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
 + */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (tsk->pid == 1) {
++		yield();
++		down_read(&mm->mmap_sem);
++		goto survive;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & 4)
++		do_exit(SIGKILL);
++	goto no_context;
 +
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
++do_sigbus:
++	up_read(&mm->mmap_sem);
 +
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/desc.h>
-+#include <asm/kdebug.h>
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & 4))
++		goto no_context;
 +
-+extern void die(const char *,struct pt_regs *,long);
++	/* User space => ok to do another page fault */
++	if (is_prefetch(regs, address, error_code))
++		return;
 +
-+#ifdef CONFIG_KPROBES
-+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-+int register_page_fault_notifier(struct notifier_block *nb)
-+{
-+	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
 +}
 +
-+int unregister_page_fault_notifier(struct notifier_block *nb)
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
 +{
-+	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++	/*
++	 * Note that races in the updates of insync and start aren't
++	 * problematic: insync can only get set bits added, and updates to
++	 * start are only improving performance (without affecting correctness
++	 * if undone).
++	 * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
++	 *      This change works just fine with 2-level paging too.
++	 */
++#define sync_index(a) ((a) >> PMD_SHIFT)
++	static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
++	static unsigned long start = TASK_SIZE;
++	unsigned long address;
++
++	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++	for (address = start;
++	     address >= TASK_SIZE && address < hypervisor_virt_start;
++	     address += 1UL << PMD_SHIFT) {
++		if (!test_bit(sync_index(address), insync)) {
++			unsigned long flags;
++			struct page *page;
++
++			spin_lock_irqsave(&pgd_lock, flags);
++			/* XEN: failure path assumes non-empty pgd_list. */
++			if (unlikely(!pgd_list)) {
++				spin_unlock_irqrestore(&pgd_lock, flags);
++				return;
++			}
++			for (page = pgd_list; page; page =
++					(struct page *)page->index)
++				if (!vmalloc_sync_one(page_address(page),
++								address)) {
++					BUG_ON(page != pgd_list);
++					break;
++				}
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			if (!page)
++				set_bit(sync_index(address), insync);
++		}
++		if (address == start && test_bit(sync_index(address), insync))
++			start = address + (1UL << PMD_SHIFT);
++	}
 +}
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/highmem-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/highmem-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,136 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
 +
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
++void *kmap(struct page *page)
 +{
-+	struct die_args args = {
-+		.regs = regs,
-+		.str = str,
-+		.err = err,
-+		.trapnr = trap,
-+		.signr = sig
-+	};
-+	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++	might_sleep();
++	if (!PageHighMem(page))
++		return page_address(page);
++	return kmap_high(page);
 +}
-+#else
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
++
++void kunmap(struct page *page)
 +{
-+	return NOTIFY_DONE;
++	if (in_interrupt())
++		BUG();
++	if (!PageHighMem(page))
++		return;
++	kunmap_high(page);
 +}
-+#endif
-+
 +
 +/*
-+ * Unlock any spinlocks which will prevent us from getting the
-+ * message out 
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
 + */
-+void bust_spinlocks(int yes)
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
 +{
-+	int loglevel_save = console_loglevel;
++	enum fixed_addresses idx;
++	unsigned long vaddr;
 +
-+	if (yes) {
-+		oops_in_progress = 1;
++	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++	inc_preempt_count();
++	if (!PageHighMem(page))
++		return page_address(page);
++
++	idx = type + KM_TYPE_NR*smp_processor_id();
++	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++	if (!pte_none(*(kmap_pte-idx)))
++		BUG();
++#endif
++	set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++	return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type,
++	                     test_bit(PG_pinned, &page->flags)
++	                     ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
++	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++	if (vaddr < FIXADDR_START) { // FIXME
++		dec_preempt_count();
++		preempt_check_resched();
 +		return;
 +	}
-+#ifdef CONFIG_VT
-+	unblank_screen();
 +#endif
-+	oops_in_progress = 0;
++
++#if defined(CONFIG_DEBUG_HIGHMEM)
++	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++		BUG();
++
 +	/*
-+	 * OK, the message is on the console.  Now we call printk()
-+	 * without oops_in_progress set so that printk will give klogd
-+	 * a poke.  Hold onto your hats...
++	 * force other mappings to Oops if they'll try to access
++	 * this pte without first remap it
 +	 */
-+	console_loglevel = 15;		/* NMI oopser may have shut the console up */
-+	printk(" ");
-+	console_loglevel = loglevel_save;
++	pte_clear(&init_mm, vaddr, kmap_pte-idx);
++	__flush_tlb_one(vaddr);
++#elif defined(CONFIG_XEN)
++	/*
++	 * We must ensure there are no dangling pagetable references when
++	 * returning memory to Xen (decrease_reservation).
++	 * XXX TODO: We could make this faster by only zapping when
++	 * kmap_flush_unused is called but that is trickier and more invasive.
++	 */
++	pte_clear(&init_mm, vaddr, kmap_pte-idx);
++#endif
++
++	dec_preempt_count();
++	preempt_check_resched();
 +}
 +
-+/*
-+ * Return EIP plus the CS segment base.  The segment limit is also
-+ * adjusted, clamped to the kernel/user address space (whichever is
-+ * appropriate), and returned in *eip_limit.
-+ *
-+ * The segment is checked, because it might have been changed by another
-+ * task between the original faulting instruction and here.
-+ *
-+ * If CS is no longer a valid code segment, or if EIP is beyond the
-+ * limit, or if it is a kernel address when CS is not a kernel segment,
-+ * then the returned value will be greater than *eip_limit.
-+ * 
-+ * This is slow, but is very rarely executed.
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
 + */
-+static inline unsigned long get_segment_eip(struct pt_regs *regs,
-+					    unsigned long *eip_limit)
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 +{
-+	unsigned long eip = regs->eip;
-+	unsigned seg = regs->xcs & 0xffff;
-+	u32 seg_ar, seg_limit, base, *desc;
++	enum fixed_addresses idx;
++	unsigned long vaddr;
 +
-+	/* Unlikely, but must come before segment checks. */
-+	if (unlikely(regs->eflags & VM_MASK)) {
-+		base = seg << 4;
-+		*eip_limit = base + 0xffff;
-+		return base + (eip & 0xffff);
-+	}
++	inc_preempt_count();
 +
-+	/* The standard kernel/user address space limit. */
-+	*eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
-+	
-+	/* By far the most common cases. */
-+	if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
-+		return eip;
++	idx = type + KM_TYPE_NR*smp_processor_id();
++	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++	__flush_tlb_one(vaddr);
 +
-+	/* Check the segment exists, is within the current LDT/GDT size,
-+	   that kernel/user (ring 0..3) has the appropriate privilege,
-+	   that it's a code segment, and get the limit. */
-+	__asm__ ("larl %3,%0; lsll %3,%1"
-+		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-+	if ((~seg_ar & 0x9800) || eip > seg_limit) {
-+		*eip_limit = 0;
-+		return 1;	 /* So that returned eip > *eip_limit. */
-+	}
-+
-+	/* Get the GDT/LDT descriptor base. 
-+	   When you look for races in this code remember that
-+	   LDT and other horrors are only used in user space. */
-+	if (seg & (1<<2)) {
-+		/* Must lock the LDT while reading it. */
-+		down(&current->mm->context.sem);
-+		desc = current->mm->context.ldt;
-+		desc = (void *)desc + (seg & ~7);
-+	} else {
-+		/* Must disable preemption while reading the GDT. */
-+ 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
-+		desc = (void *)desc + (seg & ~7);
-+	}
++	return (void*) vaddr;
++}
 +
-+	/* Decode the code segment base from the descriptor */
-+	base = get_desc_base((unsigned long *)desc);
++struct page *kmap_atomic_to_page(void *ptr)
++{
++	unsigned long idx, vaddr = (unsigned long)ptr;
++	pte_t *pte;
 +
-+	if (seg & (1<<2)) { 
-+		up(&current->mm->context.sem);
-+	} else
-+		put_cpu();
++	if (vaddr < FIXADDR_START)
++		return virt_to_page(ptr);
 +
-+	/* Adjust EIP and segment limit, and clamp at the kernel limit.
-+	   It's legitimate for segments to wrap at 0xffffffff. */
-+	seg_limit += base;
-+	if (seg_limit < *eip_limit && seg_limit >= base)
-+		*eip_limit = seg_limit;
-+	return eip + base;
++	idx = virt_to_fix(vaddr);
++	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++	return pte_page(*pte);
 +}
 +
-+/* 
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/hypervisor.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/hypervisor.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,602 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ * 
++ * Update page tables via the hypervisor.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-+{ 
-+	unsigned long limit;
-+	unsigned long instr = get_segment_eip (regs, &limit);
-+	int scan_more = 1;
-+	int prefetch = 0; 
-+	int i;
-+
-+	for (i = 0; scan_more && i < 15; i++) { 
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
-+
-+		if (instr > limit)
-+			break;
-+		if (__get_user(opcode, (unsigned char __user *) instr))
-+			break; 
 +
-+		instr_hi = opcode & 0xf0; 
-+		instr_lo = opcode & 0x0f; 
-+		instr++;
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++#include <linux/highmem.h>
 +
-+		switch (instr_hi) { 
-+		case 0x20:
-+		case 0x30:
-+			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+			
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;		
-+		case 0xF0:
-+			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;			
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
-+			if (instr > limit)
-+				break;
-+			if (__get_user(opcode, (unsigned char __user *) instr))
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;			
-+		default:
-+			scan_more = 0;
-+			break;
-+		} 
-+	}
-+	return prefetch;
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++	mmu_update_t u;
++#ifdef CONFIG_HIGHPTE
++	u.ptr = ((unsigned long)ptr >= (unsigned long)high_memory) ?
++		arbitrary_virt_to_machine(ptr) : virt_to_machine(ptr);
++#else
++	u.ptr = virt_to_machine(ptr);
++#endif
++	u.val = __pte_val(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 +}
++EXPORT_SYMBOL_GPL(xen_l1_entry_update);
 +
-+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+			      unsigned long error_code)
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
 +{
-+	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+		     boot_cpu_data.x86 >= 6)) {
-+		/* Catch an obscure case of prefetch inside an NX page. */
-+		if (nx_enabled && (error_code & 16))
-+			return 0;
-+		return __is_prefetch(regs, addr);
-+	}
-+	return 0;
-+} 
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = __pmd_val(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+static noinline void force_sig_info_fault(int si_signo, int si_code,
-+	unsigned long address, struct task_struct *tsk)
++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
 +{
-+	siginfo_t info;
-+
-+	info.si_signo = si_signo;
-+	info.si_errno = 0;
-+	info.si_code = si_code;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(si_signo, &info, tsk);
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = __pud_val(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 +}
++#endif
 +
-+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-+
-+#ifdef CONFIG_X86_PAE
-+static void dump_fault_path(unsigned long address)
++#ifdef CONFIG_X86_64
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
 +{
-+	unsigned long *p, page;
-+	unsigned long mfn; 
-+
-+	page = read_cr3();
-+	p  = (unsigned long *)__va(page);
-+	p += (address >> 30) * 2;
-+	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
-+	if (p[0] & 1) {
-+		mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
-+		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+		p  = (unsigned long *)__va(page);
-+		address &= 0x3fffffff;
-+		p += (address >> 21) * 2;
-+		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
-+		       page, p[1], p[0]);
-+		mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
-+#ifdef CONFIG_HIGHPTE
-+		if (mfn_to_pfn(mfn) >= highstart_pfn)
-+			return;
-+#endif
-+		if (p[0] & 1) {
-+			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+			p  = (unsigned long *) __va(page);
-+			address &= 0x001fffff;
-+			p += (address >> 12) * 2;
-+			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
-+			       page, p[1], p[0]);
-+		}
-+	}
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = __pgd_val(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 +}
-+#else
-+static void dump_fault_path(unsigned long address)
-+{
-+	unsigned long page;
++#endif /* CONFIG_X86_64 */
 +
-+	page = read_cr3();
-+	page = ((unsigned long *) __va(page))[address >> 22];
-+	if (oops_may_print())
-+		printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-+		       machine_to_phys(page));
-+	/*
-+	 * We must not directly access the pte in the highpte
-+	 * case if the page table is located in highmem.
-+	 * And lets rather not kmap-atomic the pte, just in case
-+	 * it's allocated already.
-+	 */
-+#ifdef CONFIG_HIGHPTE
-+	if ((page >> PAGE_SHIFT) >= highstart_pfn)
-+		return;
-+#endif
-+	if ((page & 1) && oops_may_print()) {
-+		page &= PAGE_MASK;
-+		address &= 0x003ff000;
-+		page = machine_to_phys(page);
-+		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-+		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-+		       machine_to_phys(page));
-+	}
++void xen_pt_switch(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
-+#endif
 +
-+static int spurious_fault(struct pt_regs *regs,
-+			  unsigned long address,
-+			  unsigned long error_code)
++void xen_new_user_pt(unsigned long ptr)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	/* Reserved-bit violation or user access to kernel space? */
-+	if (error_code & 0x0c)
-+		return 0;
-+
-+	pgd = init_mm.pgd + pgd_index(address);
-+	if (!pgd_present(*pgd))
-+		return 0;
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_USER_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return 0;
++void xen_tlb_flush(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
 +
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return 0;
++void xen_invlpg(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_LOCAL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (!pte_present(*pte))
-+		return 0;
-+	if ((error_code & 0x02) && !pte_write(*pte))
-+		return 0;
-+#ifdef CONFIG_X86_PAE
-+	if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
-+		return 0;
-+#endif
++#ifdef CONFIG_SMP
 +
-+	return 1;
++void xen_tlb_flush_all(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_ALL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++void xen_tlb_flush_mask(cpumask_t *mask)
 +{
-+	unsigned index = pgd_index(address);
-+	pgd_t *pgd_k;
-+	pud_t *pud, *pud_k;
-+	pmd_t *pmd, *pmd_k;
-+
-+	pgd += index;
-+	pgd_k = init_mm.pgd + index;
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++	set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	if (!pgd_present(*pgd_k))
-+		return NULL;
++void xen_invlpg_all(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_ALL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	/*
-+	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+	 * and redundant with the set_pmd() on non-PAE. As would
-+	 * set_pud.
-+	 */
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_INVLPG_MULTI;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	pud = pud_offset(pgd, address);
-+	pud_k = pud_offset(pgd_k, address);
-+	if (!pud_present(*pud_k))
-+		return NULL;
++#endif /* CONFIG_SMP */
 +
-+	pmd = pmd_offset(pud, address);
-+	pmd_k = pmd_offset(pud_k, address);
-+	if (!pmd_present(*pmd_k))
-+		return NULL;
-+	if (!pmd_present(*pmd))
-+#ifndef CONFIG_XEN
-+		set_pmd(pmd, *pmd_k);
++void xen_pgd_pin(unsigned long ptr)
++{
++	struct mmuext_op op;
++#ifdef CONFIG_X86_64
++	op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++	op.cmd = MMUEXT_PIN_L3_TABLE;
 +#else
-+		/*
-+		 * When running on Xen we must launder *pmd_k through
-+		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
-+		 */
-+		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++	op.cmd = MMUEXT_PIN_L2_TABLE;
 +#endif
-+	else
-+		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-+	return pmd_k;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+/*
-+ * Handle a fault on the vmalloc or module mapping area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static inline int vmalloc_fault(unsigned long address)
++void xen_pgd_unpin(unsigned long ptr)
 +{
-+	unsigned long pgd_paddr;
-+	pmd_t *pmd_k;
-+	pte_t *pte_k;
-+	/*
-+	 * Synchronize this task's top level page-table
-+	 * with the 'reference' page table.
-+	 *
-+	 * Do _not_ use "current" here. We might be inside
-+	 * an interrupt in the middle of a task switch..
-+	 */
-+	pgd_paddr = read_cr3();
-+	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-+	if (!pmd_k)
-+		return -1;
-+	pte_k = pte_offset_kernel(pmd_k, address);
-+	if (!pte_present(*pte_k))
-+		return -1;
-+	return 0;
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ *
-+ * error_code:
-+ *	bit 0 == 0 means no page found, 1 means protection fault
-+ *	bit 1 == 0 means read, 1 means write
-+ *	bit 2 == 0 means kernel, 1 means user-mode
-+ *	bit 3 == 1 means use of reserved bit detected
-+ *	bit 4 == 1 means fault was an instruction fetch
-+ */
-+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
-+				      unsigned long error_code)
++void xen_set_ldt(const void *ptr, unsigned int ents)
 +{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	unsigned long address;
-+	int write, si_code;
-+
-+	/* get the address */
-+        address = read_cr2();
++	struct mmuext_op op;
++	op.cmd = MMUEXT_SET_LDT;
++	op.arg1.linear_addr = (unsigned long)ptr;
++	op.arg2.nr_ents     = ents;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	/* Set the "privileged fault" bit to something sane. */
-+	error_code &= ~4;
-+	error_code |= (regs->xcs & 2) << 1;
-+	if (regs->eflags & X86_EFLAGS_VM)
-+		error_code |= 4;
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
 +
-+	tsk = current;
++static void contiguous_bitmap_set(
++	unsigned long first_page, unsigned long nr_pages)
++{
++	unsigned long start_off, end_off, curr_idx, end_idx;
 +
-+	si_code = SEGV_MAPERR;
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
 +
-+	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 9) == 0.
-+	 */
-+	if (unlikely(address >= TASK_SIZE)) {
-+#ifdef CONFIG_XEN
-+		/* Faults in hypervisor area can never be patched up. */
-+		if (address >= hypervisor_virt_start)
-+			goto bad_area_nosemaphore;
-+#endif
-+		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
-+			return;
-+		/* Can take a spurious fault if mapping changes R/O -> R/W. */
-+		if (spurious_fault(regs, address, error_code))
-+			return;
-+		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+						SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		/* 
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
-+		 */
-+		goto bad_area_nosemaphore;
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] |=
++			((1UL<<end_off)-1) & -(1UL<<start_off);
++	} else {
++		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++		while ( ++curr_idx < end_idx )
++			contiguous_bitmap[curr_idx] = ~0UL;
++		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
 +	}
++}
 +
-+	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+					SIGSEGV) == NOTIFY_STOP)
-+		return;
++static void contiguous_bitmap_clear(
++	unsigned long first_page, unsigned long nr_pages)
++{
++	unsigned long start_off, end_off, curr_idx, end_idx;
 +
-+	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
-+	   fault has been handled. */
-+	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-+		local_irq_enable();
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
 +
-+	mm = tsk->mm;
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] &=
++			-(1UL<<end_off) | ((1UL<<start_off)-1);
++	} else {
++		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++		while ( ++curr_idx != end_idx )
++			contiguous_bitmap[curr_idx] = 0;
++		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++	}
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++	unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++	unsigned long *in_frames = discontig_frames, out_frame;
++	unsigned long  frame, flags;
++	unsigned int   i;
++	int            rc, success;
++	struct xen_memory_exchange exchange = {
++		.in = {
++			.nr_extents   = 1UL << order,
++			.extent_order = 0,
++			.domid        = DOMID_SELF
++		},
++		.out = {
++			.nr_extents   = 1,
++			.extent_order = order,
++			.address_bits = address_bits,
++			.domid        = DOMID_SELF
++		}
++	};
 +
 +	/*
-+	 * If we're in an interrupt, have no user context or are running in an
-+	 * atomic region then we must not take the fault..
++	 * Currently an auto-translated guest will not perform I/O, nor will
++	 * it require PAE page directories below 4GB. Therefore any calls to
++	 * this function are redundant and can be ignored.
 +	 */
-+	if (in_atomic() || !mm)
-+		goto bad_area_nosemaphore;
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return 0;
 +
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-+	 * erroneous fault occurring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibilty of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
-+	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & 4) == 0 &&
-+		    !search_exception_tables(regs->eip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
++	if (unlikely(order > MAX_CONTIG_ORDER))
++		return -ENOMEM;
 +
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (vma->vm_start <= address)
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & 4) {
-+		/*
-+		 * Accessing the stack below %esp is always a bug.
-+		 * The large cushion allows instructions like enter
-+		 * and pusha to work.  ("enter $65535,$31" pushes
-+		 * 32 pointers and then decrements %esp by 65535.)
-+		 */
-+		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
-+			goto bad_area;
-+	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & 3) {
-+		default:	/* 3: write, present */
-+#ifdef TEST_VERIFY_AREA
-+			if (regs->cs == GET_KERNEL_CS())
-+				printk("WP fault at %08lx\n", regs->eip);
-+#endif
-+			/* fall through */
-+		case 2:		/* write, not present */
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+			write++;
-+			break;
-+		case 1:		/* read, present */
-+			goto bad_area;
-+		case 0:		/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+				goto bad_area;
++	set_xen_guest_handle(exchange.in.extent_start, in_frames);
++	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++	scrub_pages((void *)vstart, 1 << order);
++
++	balloon_lock(flags);
++
++	/* 1. Zap current PTEs, remembering MFNs. */
++	for (i = 0; i < (1U<<order); i++) {
++		in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					__pte_ma(0), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
 +	}
++	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++		BUG();
 +
-+ survive:
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+	switch (handle_mm_fault(mm, vma, address, write)) {
-+		case VM_FAULT_MINOR:
-+			tsk->min_flt++;
-+			break;
-+		case VM_FAULT_MAJOR:
-+			tsk->maj_flt++;
-+			break;
-+		case VM_FAULT_SIGBUS:
-+			goto do_sigbus;
-+		case VM_FAULT_OOM:
-+			goto out_of_memory;
-+		default:
++	/* 2. Get a new contiguous memory extent. */
++	out_frame = __pa(vstart) >> PAGE_SHIFT;
++	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++	success = (exchange.nr_exchanged == (1UL << order));
++	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++	BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		/* Compatibility when XENMEM_exchange is unsupported. */
++		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					 &exchange.in) != (1UL << order))
 +			BUG();
++		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++						&exchange.out) == 1);
++		if (!success) {
++			/* Couldn't get special memory: fall back to normal. */
++			for (i = 0; i < (1U<<order); i++)
++				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++						 &exchange.in) != (1UL<<order))
++				BUG();
++		}
 +	}
++#endif
 +
-+	/*
-+	 * Did it hit the DOS screen memory VA from vm86 mode?
-+	 */
-+	if (regs->eflags & VM_MASK) {
-+		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+		if (bit < 32)
-+			tsk->thread.screen_bitmap |= 1 << bit;
++	/* 3. Map the new extent in place of old pages. */
++	for (i = 0; i < (1U<<order); i++) {
++		frame = success ? (out_frame + i) : in_frames[i];
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					pfn_pte_ma(frame, PAGE_KERNEL), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
 +	}
-+	up_read(&mm->mmap_sem);
-+	return;
 +
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
++	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++						   ? UVMF_TLB_FLUSH|UVMF_ALL
++						   : UVMF_INVLPG|UVMF_ALL;
++	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++		BUG();
 +
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & 4) {
-+		/* 
-+		 * Valid to do another page fault here because this one came 
-+		 * from user space.
-+		 */
-+		if (is_prefetch(regs, address, error_code))
-+			return;
++	if (success)
++		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++				      1UL << order);
 +
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-+		return;
-+	}
++	balloon_unlock(flags);
 +
-+#ifdef CONFIG_X86_F00F_BUG
-+	/*
-+	 * Pentium F0 0F C7 C8 bug workaround.
-+	 */
-+	if (boot_cpu_data.f00f_bug) {
-+		unsigned long nr;
-+		
-+		nr = (address - idt_descr.address) >> 3;
++	return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
 +
-+		if (nr == 6) {
-+			do_invalid_op(regs, 0);
-+			return;
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++	unsigned long *out_frames = discontig_frames, in_frame;
++	unsigned long  frame, flags;
++	unsigned int   i;
++	int            rc, success;
++	struct xen_memory_exchange exchange = {
++		.in = {
++			.nr_extents   = 1,
++			.extent_order = order,
++			.domid        = DOMID_SELF
++		},
++		.out = {
++			.nr_extents   = 1UL << order,
++			.extent_order = 0,
++			.domid        = DOMID_SELF
 +		}
-+	}
-+#endif
++	};
 +
-+no_context:
-+	/* Are we prepared to handle this kernel fault?  */
-+	if (fixup_exception(regs))
++	if (xen_feature(XENFEAT_auto_translated_physmap) ||
++	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
 +		return;
 +
-+	/* 
-+	 * Valid to do another page fault here, because if this fault
-+	 * had been triggered by is_prefetch fixup_exception would have 
-+	 * handled it.
-+	 */
-+ 	if (is_prefetch(regs, address, error_code))
-+ 		return;
++	if (unlikely(order > MAX_CONTIG_ORDER))
++		return;
 +
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
++	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++	set_xen_guest_handle(exchange.out.extent_start, out_frames);
 +
-+	bust_spinlocks(1);
++	scrub_pages((void *)vstart, 1 << order);
 +
-+	if (oops_may_print()) {
-+	#ifdef CONFIG_X86_PAE
-+		if (error_code & 16) {
-+			pte_t *pte = lookup_address(address);
++	balloon_lock(flags);
 +
-+			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-+				printk(KERN_CRIT "kernel tried to execute "
-+					"NX-protected page - exploit attempt? "
-+					"(uid: %d)\n", current->uid);
-+		}
-+	#endif
-+		if (address < PAGE_SIZE)
-+			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
-+					"pointer dereference");
-+		else
-+			printk(KERN_ALERT "BUG: unable to handle kernel paging"
-+					" request");
-+		printk(" at virtual address %08lx\n",address);
-+		printk(KERN_ALERT " printing eip:\n");
-+		printk("%08lx\n", regs->eip);
-+	}
-+	dump_fault_path(address);
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
-+	die("Oops", regs, error_code);
-+	bust_spinlocks(0);
-+	do_exit(SIGKILL);
++	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
 +
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (tsk->pid == 1) {
-+		yield();
-+		down_read(&mm->mmap_sem);
-+		goto survive;
++	/* 1. Find start MFN of contiguous extent. */
++	in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++	/* 2. Zap current PTEs. */
++	for (i = 0; i < (1U<<order); i++) {
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					__pte_ma(0), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
++		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
 +	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & 4)
-+		do_exit(SIGKILL);
-+	goto no_context;
++	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++		BUG();
 +
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
++	/* 3. Do the exchange for non-contiguous MFNs. */
++	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++	success = (exchange.nr_exchanged == 1);
++	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++	BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		/* Compatibility when XENMEM_exchange is unsupported. */
++		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					 &exchange.in) != 1)
++			BUG();
++		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++					 &exchange.out) != (1UL << order))
++			BUG();
++		success = 1;
++	}
++#endif
 +
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & 4))
-+		goto no_context;
++	/* 4. Map new pages in place of old pages. */
++	for (i = 0; i < (1U<<order); i++) {
++		frame = success ? out_frames[i] : (in_frame + i);
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					pfn_pte_ma(frame, PAGE_KERNEL), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++	}
 +
-+	/* User space => ok to do another page fault */
-+	if (is_prefetch(regs, address, error_code))
-+		return;
++	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++						   ? UVMF_TLB_FLUSH|UVMF_ALL
++						   : UVMF_INVLPG|UVMF_ALL;
++	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++		BUG();
 +
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++	balloon_unlock(flags);
 +}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 +
-+#if !HAVE_SHARED_KERNEL_PMD
-+void vmalloc_sync_all(void)
++int xen_limit_pages_to_max_mfn(
++	struct page *pages, unsigned int order, unsigned int address_bits)
 +{
-+	/*
-+	 * Note that races in the updates of insync and start aren't
-+	 * problematic: insync can only get set bits added, and updates to
-+	 * start are only improving performance (without affecting correctness
-+	 * if undone).
-+	 * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
-+	 *      This change works just fine with 2-level paging too.
-+	 */
-+#define sync_index(a) ((a) >> PMD_SHIFT)
-+	static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
-+	static unsigned long start = TASK_SIZE;
-+	unsigned long address;
-+
-+	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-+	for (address = start;
-+	     address >= TASK_SIZE && address < hypervisor_virt_start;
-+	     address += 1UL << PMD_SHIFT) {
-+		if (!test_bit(sync_index(address), insync)) {
-+			unsigned long flags;
-+			struct page *page;
++	unsigned long flags, frame;
++	unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
++	struct page *page;
++	unsigned int i, n, nr_mcl;
++	int rc, success;
++	DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
 +
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			/*
-+			 * XEN: vmalloc_sync_one() failure path logic assumes
-+			 * pgd_list is non-empty.
-+			 */
-+			if (unlikely(!pgd_list)) {
-+				spin_unlock_irqrestore(&pgd_lock, flags);
-+				return;
-+			}
-+			for (page = pgd_list; page; page =
-+					(struct page *)page->index)
-+				if (!vmalloc_sync_one(page_address(page),
-+								address)) {
-+					BUG_ON(page != pgd_list);
-+					break;
-+				}
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			if (!page)
-+				set_bit(sync_index(address), insync);
++	struct xen_memory_exchange exchange = {
++		.in = {
++			.extent_order = 0,
++			.domid        = DOMID_SELF
++		},
++		.out = {
++			.extent_order = 0,
++			.address_bits = address_bits,
++			.domid        = DOMID_SELF
 +		}
-+		if (address == start && test_bit(sync_index(address), insync))
-+			start = address + (1UL << PMD_SHIFT);
-+	}
-+}
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/highmem-xen.c tmp-linux-2.6-xen.patch/arch/i386/mm/highmem-xen.c
---- pristine-linux-2.6.18.2/arch/i386/mm/highmem-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/highmem-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,136 @@
-+#include <linux/highmem.h>
-+#include <linux/module.h>
++	};
 +
-+void *kmap(struct page *page)
-+{
-+	might_sleep();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+	return kmap_high(page);
-+}
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return 0;
 +
-+void kunmap(struct page *page)
-+{
-+	if (in_interrupt())
-+		BUG();
-+	if (!PageHighMem(page))
-+		return;
-+	kunmap_high(page);
-+}
++	if (unlikely(order > MAX_CONTIG_ORDER))
++		return -ENOMEM;
 +
-+/*
-+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-+ * no global lock is needed and because the kmap code must perform a global TLB
-+ * invalidation when the kmap pool wraps.
-+ *
-+ * However when holding an atomic kmap is is not legal to sleep, so atomic
-+ * kmaps are appropriate for short, tight code paths only.
-+ */
-+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
-+{
-+	enum fixed_addresses idx;
-+	unsigned long vaddr;
++	bitmap_zero(limit_map, 1U << order);
++	set_xen_guest_handle(exchange.in.extent_start, in_frames);
++	set_xen_guest_handle(exchange.out.extent_start, out_frames);
 +
-+	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+	inc_preempt_count();
-+	if (!PageHighMem(page))
-+		return page_address(page);
++	/* 0. Scrub the pages. */
++	for (i = 0, n = 0; i < 1U<<order ; i++) {
++		page = &pages[i];
++		if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
++			continue;
++		__set_bit(i, limit_map);
 +
-+	idx = type + KM_TYPE_NR*smp_processor_id();
-+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+	if (!pte_none(*(kmap_pte-idx)))
-+		BUG();
++		if (!PageHighMem(page))
++			scrub_pages(page_address(page), 1);
++#ifdef CONFIG_XEN_SCRUB_PAGES
++		else {
++			scrub_pages(kmap(page), 1);
++			kunmap(page);
++			++n;
++		}
 +#endif
-+	set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++	}
++	if (bitmap_empty(limit_map, 1U << order))
++		return 0;
 +
-+	return (void*) vaddr;
-+}
++	if (n)
++		kmap_flush_unused();
 +
-+void *kmap_atomic(struct page *page, enum km_type type)
-+{
-+	return __kmap_atomic(page, type, kmap_prot);
-+}
++	balloon_lock(flags);
 +
-+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
-+void *kmap_atomic_pte(struct page *page, enum km_type type)
-+{
-+	return __kmap_atomic(page, type,
-+	                     test_bit(PG_pinned, &page->flags)
-+	                     ? PAGE_KERNEL_RO : kmap_prot);
-+}
++	/* 1. Zap current PTEs (if any), remembering MFNs. */
++	for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++		if(!test_bit(i, limit_map))
++			continue;
++		page = &pages[i];
 +
-+void kunmap_atomic(void *kvaddr, enum km_type type)
-+{
-+#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
-+	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-+	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++		out_frames[n] = page_to_pfn(page);
++		in_frames[n] = pfn_to_mfn(out_frames[n]);
 +
-+	if (vaddr < FIXADDR_START) { // FIXME
-+		dec_preempt_count();
-+		preempt_check_resched();
-+		return;
++		if (!PageHighMem(page))
++			MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++						(unsigned long)page_address(page),
++						__pte_ma(0), 0);
++
++		set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
++		++n;
 +	}
-+#endif
++	if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++		BUG();
 +
-+#if defined(CONFIG_DEBUG_HIGHMEM)
-+	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-+		BUG();
-+
-+	/*
-+	 * force other mappings to Oops if they'll try to access
-+	 * this pte without first remap it
-+	 */
-+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
-+	__flush_tlb_one(vaddr);
-+#elif defined(CONFIG_XEN)
-+	/*
-+	 * We must ensure there are no dangling pagetable references when
-+	 * returning memory to Xen (decrease_reservation).
-+	 * XXX TODO: We could make this faster by only zapping when
-+	 * kmap_flush_unused is called but that is trickier and more invasive.
-+	 */
-+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
++	/* 2. Get new memory below the required limit. */
++	exchange.in.nr_extents = n;
++	exchange.out.nr_extents = n;
++	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++	success = (exchange.nr_exchanged == n);
++	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++	BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		/* Compatibility when XENMEM_exchange is unsupported. */
++		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					 &exchange.in) != n)
++			BUG();
++		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++					 &exchange.out) != n)
++			BUG();
++		success = 1;
++	}
 +#endif
 +
-+	dec_preempt_count();
-+	preempt_check_resched();
-+}
++	/* 3. Map the new pages in place of old pages. */
++	for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++		if(!test_bit(i, limit_map))
++			continue;
++		page = &pages[i];
 +
-+/* This is the same as kmap_atomic() but can map memory that doesn't
-+ * have a struct page associated with it.
-+ */
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-+{
-+	enum fixed_addresses idx;
-+	unsigned long vaddr;
++		frame = success ? out_frames[n] : in_frames[n];
 +
-+	inc_preempt_count();
++		if (!PageHighMem(page))
++			MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++						(unsigned long)page_address(page),
++						pfn_pte_ma(frame, PAGE_KERNEL), 0);
 +
-+	idx = type + KM_TYPE_NR*smp_processor_id();
-+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-+	__flush_tlb_one(vaddr);
++		set_phys_to_machine(page_to_pfn(page), frame);
++		++n;
++	}
++	if (nr_mcl) {
++		cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
++							        ? UVMF_TLB_FLUSH|UVMF_ALL
++							        : UVMF_INVLPG|UVMF_ALL;
++		if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++			BUG();
++	}
 +
-+	return (void*) vaddr;
++	balloon_unlock(flags);
++
++	return success ? 0 : -ENOMEM;
 +}
++EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
 +
-+struct page *kmap_atomic_to_page(void *ptr)
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
 +{
-+	unsigned long idx, vaddr = (unsigned long)ptr;
-+	pte_t *pte;
++	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++	return HYPERVISOR_update_descriptor(
++		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
 +
-+	if (vaddr < FIXADDR_START)
-+		return virt_to_page(ptr);
++#define MAX_BATCHED_FULL_PTES 32
 +
-+	idx = virt_to_fix(vaddr);
-+	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-+	return pte_page(*pte);
-+}
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++			 unsigned long addr, unsigned long end, pgprot_t newprot)
++{
++	int rc = 0, i = 0;
++	mmu_update_t u[MAX_BATCHED_FULL_PTES];
++	pte_t *pte;
++	spinlock_t *ptl;
 +
-+EXPORT_SYMBOL(kmap);
-+EXPORT_SYMBOL(kunmap);
-+EXPORT_SYMBOL(kmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_pte);
-+EXPORT_SYMBOL(kunmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_to_page);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/hypervisor.c tmp-linux-2.6-xen.patch/arch/i386/mm/hypervisor.c
---- pristine-linux-2.6.18.2/arch/i386/mm/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/hypervisor.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,543 @@
-+/******************************************************************************
-+ * mm/hypervisor.c
-+ * 
-+ * Update page tables via the hypervisor.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++	if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
++		return 0;
++
++	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++	do {
++		if (pte_present(*pte)) {
++			u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
++				   | ((unsigned long)pte & ~PAGE_MASK)
++				   | MMU_PT_UPDATE_PRESERVE_AD;
++			u[i].val = __pte_val(pte_modify(*pte, newprot));
++			if (++i == MAX_BATCHED_FULL_PTES) {
++				if ((rc = HYPERVISOR_mmu_update(
++					&u[0], i, NULL, DOMID_SELF)) != 0)
++					break;
++				i = 0;
++			}
++		}
++	} while (pte++, addr += PAGE_SIZE, addr != end);
++	if (i)
++		rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
++	pte_unmap_unlock(pte - 1, ptl);
++	BUG_ON(rc && rc != -ENOSYS);
++	return !rc;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/init-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/init-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,847 @@
++/*
++ *  linux/arch/i386/mm/init.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 + */
 +
++#include <linux/module.h>
++#include <linux/signal.h>
 +#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
 +#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <asm/page.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
 +#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/features.h>
-+#include <xen/interface/memory.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
 +#include <asm/tlbflush.h>
-+#include <linux/highmem.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
 +
-+void xen_l1_entry_update(pte_t *ptr, pte_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = __pte_val(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
++extern unsigned long *contiguous_bitmap;
 +
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = __pmd_val(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
++unsigned int __VMALLOC_RESERVE = 128 << 20;
 +
-+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
 +{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = __pud_val(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
++	pud_t *pud;
++	pmd_t *pmd_table;
++		
++#ifdef CONFIG_X86_PAE
++	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++	pud = pud_offset(pgd, 0);
++	if (pmd_table != pmd_offset(pud, 0)) 
++		BUG();
++#else
++	pud = pud_offset(pgd, 0);
++	pmd_table = pmd_offset(pud, 0);
 +#endif
 +
-+#ifdef CONFIG_X86_64
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = __pgd_val(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++	return pmd_table;
 +}
-+#endif /* CONFIG_X86_64 */
 +
-+void xen_pt_switch(unsigned long ptr)
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++	if (pmd_none(*pmd)) {
++		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++		make_lowmem_page_readonly(page_table,
++					  XENFEAT_writable_page_tables);
++		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++		if (page_table != pte_offset_kernel(pmd, 0))
++			BUG();	
 +
-+void xen_new_user_pt(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++		return page_table;
++	}
++	
++	return pte_offset_kernel(pmd, 0);
 +}
 +
-+void xen_tlb_flush(void)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_tlb_flush);
++/*
++ * This function initializes a certain range of kernel virtual memory 
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
 +
-+void xen_invlpg(unsigned long ptr)
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space 
++ * so we can cache the place of the first one and move around without 
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_LOCAL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_invlpg);
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	int pgd_idx, pmd_idx;
++	unsigned long vaddr;
 +
-+#ifdef CONFIG_SMP
++	vaddr = start;
++	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
++	pgd = pgd_base + pgd_idx;
 +
-+void xen_tlb_flush_all(void)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_ALL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++		if (pgd_none(*pgd)) 
++			one_md_table_init(pgd);
++		pud = pud_offset(pgd, vaddr);
++		pmd = pmd_offset(pud, vaddr);
++		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++			if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++				one_page_table_init(pmd);
 +
-+void xen_tlb_flush_mask(cpumask_t *mask)
-+{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-+	set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++			vaddr += PMD_SIZE;
++		}
++		pmd_idx = 0;
++	}
 +}
 +
-+void xen_invlpg_all(unsigned long ptr)
++static inline int is_kernel_text(unsigned long addr)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_ALL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++		return 1;
++	return 0;
 +}
 +
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++/*
++ * This maps the physical memory to kernel virtual address space, a total 
++ * of max_low_pfn pages, by creating page tables starting from address 
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 +{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_INVLPG_MULTI;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++	unsigned long pfn;
++	pgd_t *pgd;
++	pmd_t *pmd;
++	pte_t *pte;
++	int pgd_idx, pmd_idx, pte_ofs;
 +
-+#endif /* CONFIG_SMP */
++	unsigned long max_ram_pfn = xen_start_info->nr_pages;
++	if (max_ram_pfn > max_low_pfn)
++		max_ram_pfn = max_low_pfn;
 +
-+void xen_pgd_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+#ifdef CONFIG_X86_64
-+	op.cmd = MMUEXT_PIN_L4_TABLE;
-+#elif defined(CONFIG_X86_PAE)
-+	op.cmd = MMUEXT_PIN_L3_TABLE;
++	pgd_idx = pgd_index(PAGE_OFFSET);
++	pgd = pgd_base + pgd_idx;
++	pfn = 0;
++	pmd_idx = pmd_index(PAGE_OFFSET);
++	pte_ofs = pte_index(PAGE_OFFSET);
++
++	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++		/*
++		 * Native linux hasn't PAE-paging enabled yet at this
++		 * point.  When running as xen domain we are in PAE
++		 * mode already, thus we can't simply hook a empty
++		 * pmd.  That would kill the mappings we are currently
++		 * using ...
++		 */
++		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
 +#else
-+	op.cmd = MMUEXT_PIN_L2_TABLE;
++		pmd = one_md_table_init(pgd);
 +#endif
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++		if (pfn >= max_low_pfn)
++			continue;
++		pmd += pmd_idx;
++		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++			if (address >= hypervisor_virt_start)
++				continue;
 +
-+void xen_pgd_unpin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++			/* Map with big pages if possible, otherwise create normal page tables. */
++			if (cpu_has_pse) {
++				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++				if (is_kernel_text(address) || is_kernel_text(address2))
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++				else
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++				pfn += PTRS_PER_PTE;
++			} else {
++				pte = one_page_table_init(pmd);
++
++				pte += pte_ofs;
++				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++						/* XEN: Only map initial RAM allocation. */
++						if ((pfn >= max_ram_pfn) || pte_present(*pte))
++							continue;
++						if (is_kernel_text(address))
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++						else
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++				}
++				pte_ofs = 0;
++			}
++		}
++		pmd_idx = 0;
++	}
 +}
 +
-+void xen_set_ldt(unsigned long ptr, unsigned long len)
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_SET_LDT;
-+	op.arg1.linear_addr = ptr;
-+	op.arg2.nr_ents     = len;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++		return 1;
++	return 0;
 +}
 +
-+/*
-+ * Bitmap is indexed by page number. If bit is set, the page is part of a
-+ * xen_create_contiguous_region() area of memory.
-+ */
-+unsigned long *contiguous_bitmap;
++#else
 +
-+static void contiguous_bitmap_set(
-+	unsigned long first_page, unsigned long nr_pages)
-+{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
++#define page_kills_ppro(p)	0
 +
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++#endif
 +
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] |=
-+			((1UL<<end_off)-1) & -(1UL<<start_off);
-+	} else {
-+		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
-+		while ( ++curr_idx < end_idx )
-+			contiguous_bitmap[curr_idx] = ~0UL;
-+		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
-+	}
-+}
++extern int is_available_memory(efi_memory_desc_t *);
 +
-+static void contiguous_bitmap_clear(
-+	unsigned long first_page, unsigned long nr_pages)
++int page_is_ram(unsigned long pagenr)
 +{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
++	int i;
++	unsigned long addr, end;
 +
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++	if (efi_enabled) {
++		efi_memory_desc_t *md;
++		void *p;
 +
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] &=
-+			-(1UL<<end_off) | ((1UL<<start_off)-1);
-+	} else {
-+		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
-+		while ( ++curr_idx != end_idx )
-+			contiguous_bitmap[curr_idx] = 0;
-+		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++			md = p;
++			if (!is_available_memory(md))
++				continue;
++			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++			if ((pagenr >= addr) && (pagenr < end))
++				return 1;
++		}
++		return 0;
 +	}
-+}
 +
-+/* Protected by balloon_lock. */
-+#define MAX_CONTIG_ORDER 9 /* 2MB */
-+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-+static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
-+static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++	for (i = 0; i < e820.nr_map; i++) {
 +
-+/* Ensure multi-page extents are contiguous in machine memory. */
-+int xen_create_contiguous_region(
-+	unsigned long vstart, unsigned int order, unsigned int address_bits)
-+{
-+	unsigned long *in_frames = discontig_frames, out_frame;
-+	unsigned long  frame, i, flags;
-+	long           rc;
-+	int            success;
-+	struct xen_memory_exchange exchange = {
-+		.in = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		},
-+		.out = {
-+			.nr_extents   = 1,
-+			.extent_order = order,
-+			.address_bits = address_bits,
-+			.domid        = DOMID_SELF
-+		}
-+	};
-+
-+	/*
-+	 * Currently an auto-translated guest will not perform I/O, nor will
-+	 * it require PAE page directories below 4GB. Therefore any calls to
-+	 * this function are redundant and can be ignored.
-+	 */
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return 0;
++		if (e820.map[i].type != E820_RAM)	/* not usable memory */
++			continue;
++		/*
++		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
++		 *	are not. Notably the 640->1Mb area. We need a sanity
++		 *	check here.
++		 */
++		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++		if  ((pagenr >= addr) && (pagenr < end))
++			return 1;
++	}
++	return 0;
++}
 +
-+	if (unlikely(order > MAX_CONTIG_ORDER))
-+		return -ENOMEM;
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
 +
-+	set_xen_guest_handle(exchange.in.extent_start, in_frames);
-+	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++#define kmap_get_fixmap_pte(vaddr)					\
++	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
 +
-+	scrub_pages(vstart, 1 << order);
++static void __init kmap_init(void)
++{
++	unsigned long kmap_vstart;
 +
-+	balloon_lock(flags);
++	/* cache the first kmap pte */
++	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 +
-+	/* 1. Zap current PTEs, remembering MFNs. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					__pte_ma(0), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+	}
-+	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+		BUG();
++	kmap_prot = PAGE_KERNEL;
++}
 +
-+	/* 2. Get a new contiguous memory extent. */
-+	out_frame = __pa(vstart) >> PAGE_SHIFT;
-+	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+	success = (exchange.nr_exchanged == (1UL << order));
-+	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+	BUG_ON(success && (rc != 0));
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		/* Compatibility when XENMEM_exchange is unsupported. */
-+		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					 &exchange.in) != (1UL << order))
-+			BUG();
-+		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+						&exchange.out) == 1);
-+		if (!success) {
-+			/* Couldn't get special memory: fall back to normal. */
-+			for (i = 0; i < (1UL<<order); i++)
-+				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
-+			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+						 &exchange.in) != (1UL<<order))
-+				BUG();
-+		}
-+	}
-+#endif
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	unsigned long vaddr;
 +
-+	/* 3. Map the new extent in place of old pages. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		frame = success ? (out_frame + i) : in_frames[i];
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
++	vaddr = PKMAP_BASE;
++	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 +
-+	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+						   ? UVMF_TLB_FLUSH|UVMF_ALL
-+						   : UVMF_INVLPG|UVMF_ALL;
-+	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+		BUG();
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	pud = pud_offset(pgd, vaddr);
++	pmd = pmd_offset(pud, vaddr);
++	pte = pte_offset_kernel(pmd, vaddr);
++	pkmap_page_table = pte;	
++}
 +
-+	if (success)
-+		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
-+				      1UL << order);
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++	init_page_count(page);
++	if (pfn < xen_start_info->nr_pages)
++		__free_page(page);
++	totalhigh_pages++;
++}
 +
-+	balloon_unlock(flags);
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++		ClearPageReserved(page);
++		free_new_highpage(page, pfn);
++	} else
++		SetPageReserved(page);
++}
 +
-+	return success ? 0 : -ENOMEM;
++static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++	free_new_highpage(page, pfn);
++	totalram_pages++;
++#ifdef CONFIG_FLATMEM
++	max_mapnr = max(pfn, max_mapnr);
++#endif
++	num_physpages++;
++	return 0;
 +}
-+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
 +
-+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void online_page(struct page *page)
 +{
-+	unsigned long *out_frames = discontig_frames, in_frame;
-+	unsigned long  frame, i, flags;
-+	long           rc;
-+	int            success;
-+	struct xen_memory_exchange exchange = {
-+		.in = {
-+			.nr_extents   = 1,
-+			.extent_order = order,
-+			.domid        = DOMID_SELF
-+		},
-+		.out = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		}
-+	};
++	ClearPageReserved(page);
++	add_one_highpage_hotplug(page, page_to_pfn(page));
++}
 +
-+	if (xen_feature(XENFEAT_auto_translated_physmap) ||
-+	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
-+		return;
 +
-+	if (unlikely(order > MAX_CONTIG_ORDER))
-+		return;
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++	int pfn;
++	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++		add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++	totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
 +
-+	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-+	set_xen_guest_handle(exchange.out.extent_start, out_frames);
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
 +
-+	scrub_pages(vstart, 1 << order);
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
 +
-+	balloon_lock(flags);
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
 +
-+	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++pgd_t *swapper_pg_dir;
 +
-+	/* 1. Find start MFN of contiguous extent. */
-+	in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++static void __init pagetable_init (void)
++{
++	unsigned long vaddr;
++	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
 +
-+	/* 2. Zap current PTEs. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					__pte_ma(0), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++	/* Enable PSE if available */
++	if (cpu_has_pse) {
++		set_in_cr4(X86_CR4_PSE);
 +	}
-+	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+		BUG();
 +
-+	/* 3. Do the exchange for non-contiguous MFNs. */
-+	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+	success = (exchange.nr_exchanged == 1);
-+	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+	BUG_ON(success && (rc != 0));
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		/* Compatibility when XENMEM_exchange is unsupported. */
-+		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					 &exchange.in) != 1)
-+			BUG();
-+		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+					 &exchange.out) != (1UL << order))
-+			BUG();
-+		success = 1;
++	/* Enable PGE if available */
++	if (cpu_has_pge) {
++		set_in_cr4(X86_CR4_PGE);
++		__PAGE_KERNEL |= _PAGE_GLOBAL;
++		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
 +	}
-+#endif
 +
-+	/* 4. Map new pages in place of old pages. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		frame = success ? out_frames[i] : (in_frame + i);
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
++	kernel_physical_mapping_init(pgd_base);
++	remap_numa_kva();
 +
-+	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+						   ? UVMF_TLB_FLUSH|UVMF_ALL
-+						   : UVMF_INVLPG|UVMF_ALL;
-+	if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+		BUG();
++	/*
++	 * Fixed mappings, only the page table structure has to be
++	 * created - mappings will be set by set_fixmap():
++	 */
++	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++	page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
 +
-+	balloon_unlock(flags);
++	permanent_kmaps_init(pgd_base);
 +}
-+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 +
-+int xen_limit_pages_to_max_mfn(
-+	struct page *pages, unsigned int order, unsigned int address_bits)
-+{
-+	unsigned long flags, frame;
-+	unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
-+	void *v;
-+	struct page *page;
-+	int i, nr_mcl, rc, success;
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++	__attribute__ ((aligned (PAGE_SIZE)));
 +
-+	struct xen_memory_exchange exchange = {
-+		.in = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		},
-+		.out = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.address_bits = address_bits,
-+			.domid        = DOMID_SELF
-+		}
-+	};
++static inline void save_pg_dir(void)
++{
++	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
 +
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return 0;
++void zap_low_mappings (void)
++{
++	int i;
 +
-+	if (unlikely(order > MAX_CONTIG_ORDER))
-+		return -ENOMEM;
++	save_pg_dir();
 +
-+	set_xen_guest_handle(exchange.in.extent_start, in_frames);
-+	set_xen_guest_handle(exchange.out.extent_start, out_frames);
++	/*
++	 * Zap initial low-memory mappings.
++	 *
++	 * Note that "pgd_clear()" doesn't do it for
++	 * us, because pgd_clear() is a no-op on i386.
++	 */
++	for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++		set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++	flush_tlb_all();
++}
 +
-+	/* 0. Scrub the pages. */
-+	for ( i = 0 ; i < 1UL<<order ; i++ ) {
-+		page = &pages[i];
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
 +
-+		if (!PageHighMem(page)) {
-+			v = page_address(page);
-+			scrub_pages(v, 1);
-+		} else {
-+			v = kmap(page);
-+			scrub_pages(v, 1);
-+			kunmap(page);
-+		}
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on      Enable
++ * off     Disable
++ */
++void __init noexec_setup(const char *str)
++{
++	if (!strncmp(str, "on",2) && cpu_has_nx) {
++		__supported_pte_mask |= _PAGE_NX;
++		disable_nx = 0;
++	} else if (!strncmp(str,"off",3)) {
++		disable_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
 +	}
++}
 +
-+	kmap_flush_unused();
-+
-+	balloon_lock(flags);
-+
-+	/* 1. Zap current PTEs (if any), remembering MFNs. */
-+	for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
-+		page = &pages[i];
-+
-+		out_frames[i] = page_to_pfn(page);
-+		in_frames[i] = pfn_to_mfn(out_frames[i]);
-+
-+		if (!PageHighMem(page))
-+			MULTI_update_va_mapping(cr_mcl + nr_mcl++,
-+						(unsigned long)page_address(page),
-+						__pte_ma(0), 0);
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
 +
-+		set_phys_to_machine(out_frames[i], INVALID_P2M_ENTRY);
-+	}
-+	if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
-+		BUG();
++static void __init set_nx(void)
++{
++	unsigned int v[4], l, h;
 +
-+	/* 2. Get new memory below the required limit. */
-+	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+	success = (exchange.nr_exchanged == (1UL << order));
-+	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+	BUG_ON(success && (rc != 0));
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		/* Compatibility when XENMEM_exchange is unsupported. */
-+		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					 &exchange.in) != (1UL << order))
-+			BUG();
-+		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+						&exchange.out) != (1UL <<order));
++	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++		if ((v[3] & (1 << 20)) && !disable_nx) {
++			rdmsr(MSR_EFER, l, h);
++			l |= EFER_NX;
++			wrmsr(MSR_EFER, l, h);
++			nx_enabled = 1;
++			__supported_pte_mask |= _PAGE_NX;
++		}
 +	}
-+#endif
-+
-+	/* 3. Map the new pages in place of old pages. */
-+	for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
-+		page = &pages[i];
-+		unsigned long pfn = page_to_pfn(page);
++}
 +
-+		frame = success ? out_frames[i] : in_frames[i];
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++	pte_t *pte;
++	int ret = 1;
 +
-+		if (!PageHighMem(page))
-+			MULTI_update_va_mapping(cr_mcl + nr_mcl++,
-+						(unsigned long)page_address(page),
-+						pfn_pte_ma(frame, PAGE_KERNEL), 0);
++	if (!nx_enabled)
++		goto out;
 +
-+		set_phys_to_machine(pfn, frame);
-+	}
-+	cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+						        ? UVMF_TLB_FLUSH|UVMF_ALL
-+						        : UVMF_INVLPG|UVMF_ALL;
-+	if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
-+		BUG();
++	pte = lookup_address(vaddr);
++	BUG_ON(!pte);
 +
-+	balloon_unlock(flags);
++	if (!pte_exec_kernel(*pte))
++		ret = 0;
 +
-+	return success ? 0 : -ENOMEM;
++	if (enable)
++		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++	else
++		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++	__flush_tlb_all();
++out:
++	return ret;
 +}
-+EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
 +
-+#ifdef __i386__
-+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-+{
-+	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
-+	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
-+	return HYPERVISOR_update_descriptor(
-+		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
-+}
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/init-xen.c tmp-linux-2.6-xen.patch/arch/i386/mm/init-xen.c
---- pristine-linux-2.6.18.2/arch/i386/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/init-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,850 @@
++
 +/*
-+ *  linux/arch/i386/mm/init.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
 + *
-+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
 + */
++void __init paging_init(void)
++{
++	int i;
 +
-+#include <linux/module.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/hugetlb.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/poison.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/proc_fs.h>
-+#include <linux/efi.h>
-+#include <linux/memory_hotplug.h>
-+#include <linux/initrd.h>
-+#include <linux/cpumask.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/scatterlist.h>
++#ifdef CONFIG_X86_PAE
++	set_nx();
++	if (nx_enabled)
++		printk("NX (Execute Disable) protection: active\n");
++#endif
 +
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/hypervisor.h>
-+#include <asm/swiotlb.h>
++	pagetable_init();
 +
-+extern unsigned long *contiguous_bitmap;
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++	/*
++	 * We will bail out later - printk doesn't work right now so
++	 * the user would just see a hanging kernel.
++	 * when running as xen domain we are already in PAE mode at
++	 * this point.
++	 */
++	if (cpu_has_pae)
++		set_in_cr4(X86_CR4_PAE);
++#endif
++	__flush_tlb_all();
 +
-+unsigned int __VMALLOC_RESERVE = 128 << 20;
++	kmap_init();
 +
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+unsigned long highstart_pfn, highend_pfn;
++	/* Switch to the real shared_info page, and clear the
++	 * dummy page. */
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++	memset(empty_zero_page, 0, sizeof(empty_zero_page));
 +
-+static int noinline do_test_wp_bit(void);
++	/* Setup mapping of lower 1st MB */
++	for (i = 0; i < NR_FIX_ISAMAPS; i++)
++		if (is_initial_xendomain())
++			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++		else
++			__set_fixmap(FIX_ISAMAP_BEGIN - i,
++				     virt_to_machine(empty_zero_page),
++				     PAGE_KERNEL_RO);
++}
 +
 +/*
-+ * Creates a middle page table and puts a pointer to it in the
-+ * given global directory entry. This only returns the gd entry
-+ * in non-PAE compilation mode, since the middle layer is folded.
-+ */
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
-+{
-+	pud_t *pud;
-+	pmd_t *pmd_table;
-+		
-+#ifdef CONFIG_X86_PAE
-+	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
-+	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-+	pud = pud_offset(pgd, 0);
-+	if (pmd_table != pmd_offset(pud, 0)) 
-+		BUG();
-+#else
-+	pud = pud_offset(pgd, 0);
-+	pmd_table = pmd_offset(pud, 0);
-+#endif
-+
-+	return pmd_table;
-+}
-+
-+/*
-+ * Create a page table and place a pointer to it in a middle page
-+ * directory entry.
-+ */
-+static pte_t * __init one_page_table_init(pmd_t *pmd)
-+{
-+	if (pmd_none(*pmd)) {
-+		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+		make_lowmem_page_readonly(page_table,
-+					  XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+		if (page_table != pte_offset_kernel(pmd, 0))
-+			BUG();	
-+
-+		return page_table;
-+	}
-+	
-+	return pte_offset_kernel(pmd, 0);
-+}
-+
-+/*
-+ * This function initializes a certain range of kernel virtual memory 
-+ * with new bootmem page tables, everywhere page tables are missing in
-+ * the given range.
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
 + */
 +
-+/*
-+ * NOTE: The pagetables are allocated contiguous on the physical space 
-+ * so we can cache the place of the first one and move around without 
-+ * checking the pgd every time.
-+ */
-+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++static void __init test_wp_bit(void)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	int pgd_idx, pmd_idx;
-+	unsigned long vaddr;
-+
-+	vaddr = start;
-+	pgd_idx = pgd_index(vaddr);
-+	pmd_idx = pmd_index(vaddr);
-+	pgd = pgd_base + pgd_idx;
++	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
 +
-+	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+		if (pgd_none(*pgd)) 
-+			one_md_table_init(pgd);
-+		pud = pud_offset(pgd, vaddr);
-+		pmd = pmd_offset(pud, vaddr);
-+		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+			if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
-+				one_page_table_init(pmd);
++	/* Any page-aligned address will do, the test is non-destructive */
++	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++	boot_cpu_data.wp_works_ok = do_test_wp_bit();
++	clear_fixmap(FIX_WP_TEST);
 +
-+			vaddr += PMD_SIZE;
-+		}
-+		pmd_idx = 0;
++	if (!boot_cpu_data.wp_works_ok) {
++		printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++	} else {
++		printk("Ok.\n");
 +	}
 +}
 +
-+static inline int is_kernel_text(unsigned long addr)
++static void __init set_max_mapnr_init(void)
 +{
-+	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
-+		return 1;
-+	return 0;
++#ifdef CONFIG_HIGHMEM
++	num_physpages = highend_pfn;
++#else
++	num_physpages = max_low_pfn;
++#endif
++#ifdef CONFIG_FLATMEM
++	max_mapnr = num_physpages;
++#endif
 +}
 +
-+/*
-+ * This maps the physical memory to kernel virtual address space, a total 
-+ * of max_low_pfn pages, by creating page tables starting from address 
-+ * PAGE_OFFSET.
-+ */
-+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++static struct kcore_list kcore_mem, kcore_vmalloc; 
++
++void __init mem_init(void)
 +{
++	extern int ppro_with_ram_bug(void);
++	int codesize, reservedpages, datasize, initsize;
++	int tmp;
++	int bad_ppro;
 +	unsigned long pfn;
-+	pgd_t *pgd;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	int pgd_idx, pmd_idx, pte_ofs;
-+
-+	unsigned long max_ram_pfn = xen_start_info->nr_pages;
-+	if (max_ram_pfn > max_low_pfn)
-+		max_ram_pfn = max_low_pfn;
 +
-+	pgd_idx = pgd_index(PAGE_OFFSET);
-+	pgd = pgd_base + pgd_idx;
-+	pfn = 0;
-+	pmd_idx = pmd_index(PAGE_OFFSET);
-+	pte_ofs = pte_index(PAGE_OFFSET);
++	contiguous_bitmap = alloc_bootmem_low_pages(
++		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
++	BUG_ON(!contiguous_bitmap);
++	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
 +
-+	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-+#ifdef CONFIG_XEN
-+		/*
-+		 * Native linux hasn't PAE-paging enabled yet at this
-+		 * point.  When running as xen domain we are in PAE
-+		 * mode already, thus we can't simply hook a empty
-+		 * pmd.  That would kill the mappings we are currently
-+		 * using ...
-+		 */
-+		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
-+#else
-+		pmd = one_md_table_init(pgd);
++#if defined(CONFIG_SWIOTLB)
++	swiotlb_init();	
 +#endif
-+		if (pfn >= max_low_pfn)
-+			continue;
-+		pmd += pmd_idx;
-+		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-+			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+			if (address >= hypervisor_virt_start)
-+				continue;
-+
-+			/* Map with big pages if possible, otherwise create normal page tables. */
-+			if (cpu_has_pse) {
-+				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
 +
-+				if (is_kernel_text(address) || is_kernel_text(address2))
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-+				else
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+				pfn += PTRS_PER_PTE;
-+			} else {
-+				pte = one_page_table_init(pmd);
++#ifdef CONFIG_FLATMEM
++	if (!mem_map)
++		BUG();
++#endif
++	
++	bad_ppro = ppro_with_ram_bug();
 +
-+				pte += pte_ofs;
-+				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
-+						/* XEN: Only map initial RAM allocation. */
-+						if ((pfn >= max_ram_pfn) || pte_present(*pte))
-+							continue;
-+						if (is_kernel_text(address))
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-+						else
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-+				}
-+				pte_ofs = 0;
-+			}
-+		}
-+		pmd_idx = 0;
++#ifdef CONFIG_HIGHMEM
++	/* check that fixmap and pkmap do not overlap */
++	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++		BUG();
 +	}
-+}
-+
-+#ifndef CONFIG_XEN
-+
-+static inline int page_kills_ppro(unsigned long pagenr)
-+{
-+	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-+		return 1;
-+	return 0;
-+}
++#endif
++ 
++	set_max_mapnr_init();
 +
++#ifdef CONFIG_HIGHMEM
++	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
 +#else
-+
-+#define page_kills_ppro(p)	0
-+
++	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
 +#endif
-+
-+extern int is_available_memory(efi_memory_desc_t *);
-+
-+int page_is_ram(unsigned long pagenr)
-+{
-+	int i;
-+	unsigned long addr, end;
-+
-+	if (efi_enabled) {
-+		efi_memory_desc_t *md;
-+		void *p;
-+
-+		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+			md = p;
-+			if (!is_available_memory(md))
-+				continue;
-+			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-+
-+			if ((pagenr >= addr) && (pagenr < end))
-+				return 1;
-+		}
-+		return 0;
++	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++	       VMALLOC_START,VMALLOC_END,MAXMEM);
++	BUG_ON(VMALLOC_START > VMALLOC_END);
++	
++	/* this will put all low memory onto the freelists */
++	totalram_pages += free_all_bootmem();
++	/* XEN: init and count low-mem pages outside initial allocation. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++		ClearPageReserved(pfn_to_page(pfn));
++		init_page_count(pfn_to_page(pfn));
++		totalram_pages++;
 +	}
 +
-+	for (i = 0; i < e820.nr_map; i++) {
-+
-+		if (e820.map[i].type != E820_RAM)	/* not usable memory */
-+			continue;
++	reservedpages = 0;
++	for (tmp = 0; tmp < max_low_pfn; tmp++)
 +		/*
-+		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
-+		 *	are not. Notably the 640->1Mb area. We need a sanity
-+		 *	check here.
++		 * Only count reserved RAM pages
 +		 */
-+		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-+		if  ((pagenr >= addr) && (pagenr < end))
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HIGHMEM
-+pte_t *kmap_pte;
-+pgprot_t kmap_prot;
++		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++			reservedpages++;
 +
-+#define kmap_get_fixmap_pte(vaddr)					\
-+	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++	set_highmem_pages_init(bad_ppro);
 +
-+static void __init kmap_init(void)
-+{
-+	unsigned long kmap_vstart;
++	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
++	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 +
-+	/* cache the first kmap pte */
-+	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-+	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++		   VMALLOC_END-VMALLOC_START);
 +
-+	kmap_prot = PAGE_KERNEL;
-+}
++	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++		num_physpages << (PAGE_SHIFT-10),
++		codesize >> 10,
++		reservedpages << (PAGE_SHIFT-10),
++		datasize >> 10,
++		initsize >> 10,
++		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++	       );
 +
-+static void __init permanent_kmaps_init(pgd_t *pgd_base)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	unsigned long vaddr;
++#ifdef CONFIG_X86_PAE
++	if (!cpu_has_pae)
++		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++	if (boot_cpu_data.wp_works_ok < 0)
++		test_wp_bit();
 +
-+	vaddr = PKMAP_BASE;
-+	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++	/*
++	 * Subtle. SMP is doing it's boot stuff late (because it has to
++	 * fork idle threads) - but it also needs low mappings for the
++	 * protected-mode entry to work. We zap these entries only after
++	 * the WP-bit has been tested.
++	 */
++#ifndef CONFIG_SMP
++	zap_low_mappings();
++#endif
 +
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	pkmap_page_table = pte;	
++	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
 +}
 +
-+static void __meminit free_new_highpage(struct page *page, int pfn)
++/*
++ * this is for the non-NUMA, single node SMP system case.
++ * Specifically, in the case of x86, we will always add
++ * memory to the highmem for now.
++ */
++#ifdef CONFIG_MEMORY_HOTPLUG
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++int arch_add_memory(int nid, u64 start, u64 size)
 +{
-+	init_page_count(page);
-+	if (pfn < xen_start_info->nr_pages)
-+		__free_page(page);
-+	totalhigh_pages++;
++	struct pglist_data *pgdata = &contig_page_data;
++	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
++	unsigned long start_pfn = start >> PAGE_SHIFT;
++	unsigned long nr_pages = size >> PAGE_SHIFT;
++
++	return __add_pages(zone, start_pfn, nr_pages);
 +}
 +
-+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++int remove_memory(u64 start, u64 size)
 +{
-+	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-+		ClearPageReserved(page);
-+		free_new_highpage(page, pfn);
-+	} else
-+		SetPageReserved(page);
++	return -EINVAL;
 +}
++#endif
++#endif
 +
-+static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++kmem_cache_t *pgd_cache;
++kmem_cache_t *pmd_cache;
++
++void __init pgtable_cache_init(void)
 +{
-+	free_new_highpage(page, pfn);
-+	totalram_pages++;
-+#ifdef CONFIG_FLATMEM
-+	max_mapnr = max(pfn, max_mapnr);
++	if (PTRS_PER_PMD > 1) {
++		pmd_cache = kmem_cache_create("pmd",
++					PTRS_PER_PMD*sizeof(pmd_t),
++					PTRS_PER_PMD*sizeof(pmd_t),
++					0,
++					pmd_ctor,
++					NULL);
++		if (!pmd_cache)
++			panic("pgtable_cache_init(): cannot create pmd cache");
++	}
++	pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++				PTRS_PER_PGD*sizeof(pgd_t),
++				PTRS_PER_PGD*sizeof(pgd_t),
++#else
++				PAGE_SIZE,
++				PAGE_SIZE,
 +#endif
-+	num_physpages++;
-+	return 0;
++				0,
++				pgd_ctor,
++				PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++	if (!pgd_cache)
++		panic("pgtable_cache_init(): Cannot create pgd cache");
 +}
 +
 +/*
-+ * Not currently handling the NUMA case.
-+ * Assuming single node and all memory that
-+ * has been added dynamically that would be
-+ * onlined here is in HIGHMEM
++ * This function cannot be __init, since exceptions don't work in that
++ * section.  Put this after the callers, so that it cannot be inlined.
 + */
-+void online_page(struct page *page)
++static int noinline do_test_wp_bit(void)
 +{
-+	ClearPageReserved(page);
-+	add_one_highpage_hotplug(page, page_to_pfn(page));
++	char tmp_reg;
++	int flag;
++
++	__asm__ __volatile__(
++		"	movb %0,%1	\n"
++		"1:	movb %1,%0	\n"
++		"	xorl %2,%2	\n"
++		"2:			\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 4	\n"
++		"	.long 1b,2b	\n"
++		".previous		\n"
++		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++		 "=q" (tmp_reg),
++		 "=r" (flag)
++		:"2" (1)
++		:"memory");
++	
++	return flag;
 +}
 +
++#ifdef CONFIG_DEBUG_RODATA
 +
-+#ifdef CONFIG_NUMA
-+extern void set_highmem_pages_init(int);
-+#else
-+static void __init set_highmem_pages_init(int bad_ppro)
++void mark_rodata_ro(void)
 +{
-+	int pfn;
-+	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-+		add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-+	totalram_pages += totalhigh_pages;
-+}
-+#endif /* CONFIG_FLATMEM */
++	unsigned long addr = (unsigned long)__start_rodata;
 +
-+#else
-+#define kmap_init() do { } while (0)
-+#define permanent_kmaps_init(pgd_base) do { } while (0)
-+#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+#endif /* CONFIG_HIGHMEM */
++	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
 +
-+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+EXPORT_SYMBOL(__PAGE_KERNEL);
-+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++	printk("Write protecting the kernel read-only data: %uk\n",
++			(__end_rodata - __start_rodata) >> 10);
 +
-+#ifdef CONFIG_NUMA
-+extern void __init remap_numa_kva(void);
-+#else
-+#define remap_numa_kva() do {} while (0)
++	/*
++	 * change_page_attr() requires a global_flush_tlb() call after it.
++	 * We do this after the printk so that if something went wrong in the
++	 * change, the printk gets out at least to give a better debug hint
++	 * of who is the culprit.
++	 */
++	global_flush_tlb();
++}
 +#endif
 +
-+pgd_t *swapper_pg_dir;
-+
-+static void __init pagetable_init (void)
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
 +{
-+	unsigned long vaddr;
-+	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
-+
-+	swapper_pg_dir = pgd_base;
-+	init_mm.pgd    = pgd_base;
-+
-+	/* Enable PSE if available */
-+	if (cpu_has_pse) {
-+		set_in_cr4(X86_CR4_PSE);
-+	}
++	unsigned long addr;
 +
-+	/* Enable PGE if available */
-+	if (cpu_has_pge) {
-+		set_in_cr4(X86_CR4_PGE);
-+		__PAGE_KERNEL |= _PAGE_GLOBAL;
-+		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++	for (addr = begin; addr < end; addr += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(addr));
++		init_page_count(virt_to_page(addr));
++		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++		free_page(addr);
++		totalram_pages++;
 +	}
-+
-+	kernel_physical_mapping_init(pgd_base);
-+	remap_numa_kva();
-+
-+	/*
-+	 * Fixed mappings, only the page table structure has to be
-+	 * created - mappings will be set by set_fixmap():
-+	 */
-+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-+	page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
-+
-+	permanent_kmaps_init(pgd_base);
++	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
 +}
 +
-+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
-+/*
-+ * Swap suspend & friends need this for resume because things like the intel-agp
-+ * driver might have split up a kernel 4MB mapping.
-+ */
-+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+	__attribute__ ((aligned (PAGE_SIZE)));
-+
-+static inline void save_pg_dir(void)
++void free_initmem(void)
 +{
-+	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++	free_init_pages("unused kernel memory",
++			(unsigned long)(&__init_begin),
++			(unsigned long)(&__init_end));
 +}
-+#else
-+static inline void save_pg_dir(void)
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
 +{
++	free_init_pages("initrd memory", start, end);
 +}
 +#endif
 +
-+void zap_low_mappings (void)
-+{
-+	int i;
-+
-+	save_pg_dir();
-+
-+	/*
-+	 * Zap initial low-memory mappings.
-+	 *
-+	 * Note that "pgd_clear()" doesn't do it for
-+	 * us, because pgd_clear() is a no-op on i386.
-+	 */
-+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-+#else
-+		set_pgd(swapper_pg_dir+i, __pgd(0));
-+#endif
-+	flush_tlb_all();
-+}
-+
-+static int disable_nx __initdata = 0;
-+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+
-+/*
-+ * noexec = on|off
-+ *
-+ * Control non executable mappings.
-+ *
-+ * on      Enable
-+ * off     Disable
-+ */
-+void __init noexec_setup(const char *str)
-+{
-+	if (!strncmp(str, "on",2) && cpu_has_nx) {
-+		__supported_pte_mask |= _PAGE_NX;
-+		disable_nx = 0;
-+	} else if (!strncmp(str,"off",3)) {
-+		disable_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
-+	}
-+}
-+
-+int nx_enabled = 0;
-+#ifdef CONFIG_X86_PAE
-+
-+static void __init set_nx(void)
-+{
-+	unsigned int v[4], l, h;
-+
-+	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-+		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-+		if ((v[3] & (1 << 20)) && !disable_nx) {
-+			rdmsr(MSR_EFER, l, h);
-+			l |= EFER_NX;
-+			wrmsr(MSR_EFER, l, h);
-+			nx_enabled = 1;
-+			__supported_pte_mask |= _PAGE_NX;
-+		}
-+	}
-+}
-+
-+/*
-+ * Enables/disables executability of a given kernel page and
-+ * returns the previous setting.
-+ */
-+int __init set_kernel_exec(unsigned long vaddr, int enable)
-+{
-+	pte_t *pte;
-+	int ret = 1;
-+
-+	if (!nx_enabled)
-+		goto out;
-+
-+	pte = lookup_address(vaddr);
-+	BUG_ON(!pte);
-+
-+	if (!pte_exec_kernel(*pte))
-+		ret = 0;
-+
-+	if (enable)
-+		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+	else
-+		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-+	__flush_tlb_all();
-+out:
-+	return ret;
-+}
-+
-+#endif
-+
-+/*
-+ * paging_init() sets up the page tables - note that the first 8MB are
-+ * already mapped by head.S.
-+ *
-+ * This routines also unmaps the page at virtual kernel address 0, so
-+ * that we can trap those pesky NULL-reference errors in the kernel.
-+ */
-+void __init paging_init(void)
-+{
-+	int i;
-+
-+#ifdef CONFIG_X86_PAE
-+	set_nx();
-+	if (nx_enabled)
-+		printk("NX (Execute Disable) protection: active\n");
-+#endif
-+
-+	pagetable_init();
-+
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+	/*
-+	 * We will bail out later - printk doesn't work right now so
-+	 * the user would just see a hanging kernel.
-+	 * when running as xen domain we are already in PAE mode at
-+	 * this point.
-+	 */
-+	if (cpu_has_pae)
-+		set_in_cr4(X86_CR4_PAE);
-+#endif
-+	__flush_tlb_all();
-+
-+	kmap_init();
-+
-+	/* Switch to the real shared_info page, and clear the
-+	 * dummy page. */
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+	memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+
-+	/* Setup mapping of lower 1st MB */
-+	for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+		if (is_initial_xendomain())
-+			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+		else
-+			__set_fixmap(FIX_ISAMAP_BEGIN - i,
-+				     virt_to_machine(empty_zero_page),
-+				     PAGE_KERNEL_RO);
-+}
-+
-+/*
-+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
-+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
-+ * used to involve black magic jumps to work around some nasty CPU bugs,
-+ * but fortunately the switch to using exceptions got rid of all that.
-+ */
-+
-+static void __init test_wp_bit(void)
-+{
-+	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-+
-+	/* Any page-aligned address will do, the test is non-destructive */
-+	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-+	boot_cpu_data.wp_works_ok = do_test_wp_bit();
-+	clear_fixmap(FIX_WP_TEST);
-+
-+	if (!boot_cpu_data.wp_works_ok) {
-+		printk("No.\n");
-+#ifdef CONFIG_X86_WP_WORKS_OK
-+		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+#endif
-+	} else {
-+		printk("Ok.\n");
-+	}
-+}
-+
-+static void __init set_max_mapnr_init(void)
-+{
-+#ifdef CONFIG_HIGHMEM
-+	num_physpages = highend_pfn;
-+#else
-+	num_physpages = max_low_pfn;
-+#endif
-+#ifdef CONFIG_FLATMEM
-+	max_mapnr = num_physpages;
-+#endif
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc; 
-+
-+void __init mem_init(void)
-+{
-+	extern int ppro_with_ram_bug(void);
-+	int codesize, reservedpages, datasize, initsize;
-+	int tmp;
-+	int bad_ppro;
-+	unsigned long pfn;
-+
-+	contiguous_bitmap = alloc_bootmem_low_pages(
-+		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+	BUG_ON(!contiguous_bitmap);
-+	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+	swiotlb_init();	
-+#endif
-+
-+#ifdef CONFIG_FLATMEM
-+	if (!mem_map)
-+		BUG();
-+#endif
-+	
-+	bad_ppro = ppro_with_ram_bug();
-+
-+#ifdef CONFIG_HIGHMEM
-+	/* check that fixmap and pkmap do not overlap */
-+	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-+				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+		BUG();
-+	}
-+#endif
-+ 
-+	set_max_mapnr_init();
-+
-+#ifdef CONFIG_HIGHMEM
-+	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-+#else
-+	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-+#endif
-+	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
-+	       VMALLOC_START,VMALLOC_END,MAXMEM);
-+	BUG_ON(VMALLOC_START > VMALLOC_END);
-+	
-+	/* this will put all low memory onto the freelists */
-+	totalram_pages += free_all_bootmem();
-+	/* XEN: init and count low-mem pages outside initial allocation. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
-+		ClearPageReserved(pfn_to_page(pfn));
-+		init_page_count(pfn_to_page(pfn));
-+		totalram_pages++;
-+	}
-+
-+	reservedpages = 0;
-+	for (tmp = 0; tmp < max_low_pfn; tmp++)
-+		/*
-+		 * Only count reserved RAM pages
-+		 */
-+		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+			reservedpages++;
-+
-+	set_highmem_pages_init(bad_ppro);
-+
-+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+		   VMALLOC_END-VMALLOC_START);
-+
-+	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+		num_physpages << (PAGE_SHIFT-10),
-+		codesize >> 10,
-+		reservedpages << (PAGE_SHIFT-10),
-+		datasize >> 10,
-+		initsize >> 10,
-+		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-+	       );
-+
-+#ifdef CONFIG_X86_PAE
-+	if (!cpu_has_pae)
-+		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-+#endif
-+	if (boot_cpu_data.wp_works_ok < 0)
-+		test_wp_bit();
-+
-+	/*
-+	 * Subtle. SMP is doing it's boot stuff late (because it has to
-+	 * fork idle threads) - but it also needs low mappings for the
-+	 * protected-mode entry to work. We zap these entries only after
-+	 * the WP-bit has been tested.
-+	 */
-+#ifndef CONFIG_SMP
-+	zap_low_mappings();
-+#endif
-+
-+	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
-+}
-+
-+/*
-+ * this is for the non-NUMA, single node SMP system case.
-+ * Specifically, in the case of x86, we will always add
-+ * memory to the highmem for now.
-+ */
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+int arch_add_memory(int nid, u64 start, u64 size)
-+{
-+	struct pglist_data *pgdata = &contig_page_data;
-+	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
-+	unsigned long start_pfn = start >> PAGE_SHIFT;
-+	unsigned long nr_pages = size >> PAGE_SHIFT;
-+
-+	return __add_pages(zone, start_pfn, nr_pages);
-+}
-+
-+int remove_memory(u64 start, u64 size)
-+{
-+	return -EINVAL;
-+}
-+#endif
-+#endif
-+
-+kmem_cache_t *pgd_cache;
-+kmem_cache_t *pmd_cache;
-+
-+void __init pgtable_cache_init(void)
-+{
-+	if (PTRS_PER_PMD > 1) {
-+		pmd_cache = kmem_cache_create("pmd",
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					0,
-+					pmd_ctor,
-+					NULL);
-+		if (!pmd_cache)
-+			panic("pgtable_cache_init(): cannot create pmd cache");
-+	}
-+	pgd_cache = kmem_cache_create("pgd",
-+#ifndef CONFIG_XEN
-+				PTRS_PER_PGD*sizeof(pgd_t),
-+				PTRS_PER_PGD*sizeof(pgd_t),
-+#else
-+				PAGE_SIZE,
-+				PAGE_SIZE,
-+#endif
-+				0,
-+				pgd_ctor,
-+				PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
-+	if (!pgd_cache)
-+		panic("pgtable_cache_init(): Cannot create pgd cache");
-+}
-+
-+/*
-+ * This function cannot be __init, since exceptions don't work in that
-+ * section.  Put this after the callers, so that it cannot be inlined.
-+ */
-+static int noinline do_test_wp_bit(void)
-+{
-+	char tmp_reg;
-+	int flag;
-+
-+	__asm__ __volatile__(
-+		"	movb %0,%1	\n"
-+		"1:	movb %1,%0	\n"
-+		"	xorl %2,%2	\n"
-+		"2:			\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4	\n"
-+		"	.long 1b,2b	\n"
-+		".previous		\n"
-+		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-+		 "=q" (tmp_reg),
-+		 "=r" (flag)
-+		:"2" (1)
-+		:"memory");
-+	
-+	return flag;
-+}
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+
-+void mark_rodata_ro(void)
-+{
-+	unsigned long addr = (unsigned long)__start_rodata;
-+
-+	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
-+		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
-+
-+	printk("Write protecting the kernel read-only data: %uk\n",
-+			(__end_rodata - __start_rodata) >> 10);
-+
-+	/*
-+	 * change_page_attr() requires a global_flush_tlb() call after it.
-+	 * We do this after the printk so that if something went wrong in the
-+	 * change, the printk gets out at least to give a better debug hint
-+	 * of who is the culprit.
-+	 */
-+	global_flush_tlb();
-+}
-+#endif
-+
-+void free_init_pages(char *what, unsigned long begin, unsigned long end)
-+{
-+	unsigned long addr;
-+
-+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(addr));
-+		init_page_count(virt_to_page(addr));
-+		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-+		free_page(addr);
-+		totalram_pages++;
-+	}
-+	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-+}
-+
-+void free_initmem(void)
-+{
-+	free_init_pages("unused kernel memory",
-+			(unsigned long)(&__init_begin),
-+			(unsigned long)(&__init_end));
-+}
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+	free_init_pages("initrd memory", start, end);
-+}
-+#endif
-+
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/ioremap-xen.c tmp-linux-2.6-xen.patch/arch/i386/mm/ioremap-xen.c
---- pristine-linux-2.6.18.2/arch/i386/mm/ioremap-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/ioremap-xen.c	2007-09-30 18:06:18.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/ioremap-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/ioremap-xen.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,443 @@
 +/*
 + * arch/i386/mm/ioremap.c
@@ -19426,7 +19858,7 @@
 +		 * Fill in the machine address: PTE ptr is done later by
 +		 * apply_to_page_range(). 
 +		 */
-+		v->val = __pte_val(pfn_pte_ma(mfn, prot));
++		v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO;
 +
 +		mfn++;
 +		address += PAGE_SIZE; 
@@ -19791,25 +20223,10 @@
 +		--nrpages;
 +	}
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/Makefile tmp-linux-2.6-xen.patch/arch/i386/mm/Makefile
---- pristine-linux-2.6.18.2/arch/i386/mm/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -8,3 +8,11 @@ obj-$(CONFIG_NUMA) += discontig.o
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_HIGHMEM) += highmem.o
- obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y		+= hypervisor.o
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/pageattr.c tmp-linux-2.6-xen.patch/arch/i386/mm/pageattr.c
---- pristine-linux-2.6.18.2/arch/i386/mm/pageattr.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/pageattr.c	2007-10-14 01:51:15.000000000 +0200
-@@ -84,7 +84,7 @@ static void set_pmd_pte(pte_t *kpte, uns
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/pageattr.c
+--- a/arch/i386/mm/pageattr.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/mm/pageattr.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -84,7 +84,7 @@
  	unsigned long flags;
  
  	set_pte_atomic(kpte, pte); 	/* change init_mm */
@@ -19818,158 +20235,50 @@
  		return;
  
  	spin_lock_irqsave(&pgd_lock, flags);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/pgtable.c tmp-linux-2.6-xen.patch/arch/i386/mm/pgtable.c
---- pristine-linux-2.6.18.2/arch/i386/mm/pgtable.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/pgtable.c	2007-10-14 01:51:15.000000000 +0200
-@@ -12,6 +12,7 @@
- #include <linux/slab.h>
- #include <linux/pagemap.h>
- #include <linux/spinlock.h>
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/pgtable-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/pgtable-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,725 @@
++/*
++ *  linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
 +#include <linux/module.h>
- 
- #include <asm/system.h>
- #include <asm/pgtable.h>
-@@ -137,6 +138,10 @@ void set_pmd_pfn(unsigned long vaddr, un
- 	__flush_tlb_one(vaddr);
- }
- 
-+static int nr_fixmaps = 0;
-+unsigned long __FIXADDR_TOP = 0xfffff000;
-+EXPORT_SYMBOL(__FIXADDR_TOP);
 +
- void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
- {
- 	unsigned long address = __fix_to_virt(idx);
-@@ -146,6 +151,13 @@ void __set_fixmap (enum fixed_addresses 
- 		return;
- 	}
- 	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+	nr_fixmaps++;
-+}
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
 +
-+void set_fixaddr_top(unsigned long top)
-+{
-+	BUG_ON(nr_fixmaps > 0);
-+	__FIXADDR_TOP = top - PAGE_SIZE;
- }
- 
- pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-@@ -214,9 +226,10 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
- 		spin_lock_irqsave(&pgd_lock, flags);
- 	}
- 
--	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
--			swapper_pg_dir + USER_PTRS_PER_PGD,
--			KERNEL_PGD_PTRS);
-+	if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
-+		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+				swapper_pg_dir + USER_PTRS_PER_PGD,
-+				KERNEL_PGD_PTRS);
- 	if (PTRS_PER_PMD > 1)
- 		return;
- 
-@@ -248,6 +261,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
- 			goto out_oom;
- 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
- 	}
++#include <xen/features.h>
++#include <asm/hypervisor.h>
 +
-+	if (!HAVE_SHARED_KERNEL_PMD) {
-+		unsigned long flags;
++static void pgd_test_and_unpin(pgd_t *pgd);
 +
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+			if (!pmd)
-+				goto out_oom;
-+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
-+		}
-+
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+			pgd_t *kpgd = pgd_offset_k(v);
-+			pud_t *kpud = pud_offset(kpgd, v);
-+			pmd_t *kpmd = pmd_offset(kpud, v);
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			memcpy(pmd, kpmd, PAGE_SIZE);
-+		}
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+	}
-+
- 	return pgd;
- 
- out_oom:
-@@ -262,9 +299,23 @@ void pgd_free(pgd_t *pgd)
- 	int i;
- 
- 	/* in the PAE case user pgd entries are overwritten before usage */
--	if (PTRS_PER_PMD > 1)
--		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
--			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-+	if (PTRS_PER_PMD > 1) {
-+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			kmem_cache_free(pmd_cache, pmd);
-+		}
-+		if (!HAVE_SHARED_KERNEL_PMD) {
-+			unsigned long flags;
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			pgd_list_del(pgd);
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+				kmem_cache_free(pmd_cache, pmd);
-+			}
-+		}
-+	}
- 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
- 	kmem_cache_free(pgd_cache, pgd);
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/mm/pgtable-xen.c tmp-linux-2.6-xen.patch/arch/i386/mm/pgtable-xen.c
---- pristine-linux-2.6.18.2/arch/i386/mm/pgtable-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/mm/pgtable-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,727 @@
-+/*
-+ *  linux/arch/i386/mm/pgtable.c
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/highmem.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/spinlock.h>
-+#include <linux/module.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+
-+#include <xen/features.h>
-+#include <asm/hypervisor.h>
-+
-+static void pgd_test_and_unpin(pgd_t *pgd);
-+
-+void show_mem(void)
-+{
-+	int total = 0, reserved = 0;
-+	int shared = 0, cached = 0;
-+	int highmem = 0;
-+	struct page *page;
-+	pg_data_t *pgdat;
-+	unsigned long i;
-+	unsigned long flags;
++void show_mem(void)
++{
++	int total = 0, reserved = 0;
++	int shared = 0, cached = 0;
++	int highmem = 0;
++	struct page *page;
++	pg_data_t *pgdat;
++	unsigned long i;
++	unsigned long flags;
 +
 +	printk(KERN_INFO "Mem-info:\n");
 +	show_free_areas();
@@ -20006,87 +20315,6 @@
 +}
 +
 +/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
-+	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	if (pgprot_val(flags))
-+		/* <pfn,flags> stored as-is, to permit clearing entries */
-+		set_pte(pte, pfn_pte(pfn, flags));
-+	else
-+		pte_clear(&init_mm, vaddr, pte);
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-+			   pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
-+	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	if (pgprot_val(flags))
-+		/* <pfn,flags> stored as-is, to permit clearing entries */
-+		set_pte(pte, pfn_pte_ma(pfn, flags));
-+	else
-+		pte_clear(&init_mm, vaddr, pte);
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+/*
 + * Associate a large virtual page frame with a given physical page frame 
 + * and protection flags for that frame. pfn is for the base of the page,
 + * vaddr is what the page gets mapped to - both must be properly aligned. 
@@ -20136,6 +20364,7 @@
 +void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
 +{
 +	unsigned long address = __fix_to_virt(idx);
++	pte_t pte;
 +
 +	if (idx >= __end_of_fixed_addresses) {
 +		BUG();
@@ -20143,16 +20372,16 @@
 +	}
 +	switch (idx) {
 +	case FIX_WP_TEST:
-+#ifdef CONFIG_X86_F00F_BUG
-+	case FIX_F00F_IDT:
-+#endif
 +	case FIX_VDSO:
-+		set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++		pte = pfn_pte(phys >> PAGE_SHIFT, flags);
 +		break;
 +	default:
-+		set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++		pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
 +		break;
 +	}
++	if (HYPERVISOR_update_va_mapping(address, pte,
++					 UVMF_INVLPG|UVMF_ALL))
++		BUG();
 +	nr_fixmaps++;
 +}
 +
@@ -20506,10 +20735,71 @@
 +	}
 +}
 +
-+static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
++static void _pin_lock(struct mm_struct *mm, int lock) {
++	if (lock)
++		spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++	/* While mm->page_table_lock protects us against insertions and
++	 * removals of higher level page table pages, it doesn't protect
++	 * against updates of pte-s. Such updates, however, require the
++	 * pte pages to be in consistent state (unpinned+writable or
++	 * pinned+readonly). The pinning and attribute changes, however
++	 * cannot be done atomically, which is why such updates must be
++	 * prevented from happening concurrently.
++	 * Note that no pte lock can ever elsewhere be acquired nesting
++	 * with an already acquired one in the same mm, or with the mm's
++	 * page_table_lock already acquired, as that would break in the
++	 * non-split case (where all these are actually resolving to the
++	 * one page_table_lock). Thus acquiring all of them here is not
++	 * going to result in dead locks, and the order of acquires
++	 * doesn't matter.
++	 */
++	{
++		pgd_t *pgd = mm->pgd;
++		unsigned g;
++
++		for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++			pud_t *pud;
++			unsigned u;
++
++			if (pgd_none(*pgd))
++				continue;
++			pud = pud_offset(pgd, 0);
++			for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++				pmd_t *pmd;
++				unsigned m;
++
++				if (pud_none(*pud))
++					continue;
++				pmd = pmd_offset(pud, 0);
++				for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++					spinlock_t *ptl;
++
++					if (pmd_none(*pmd))
++						continue;
++					ptl = pte_lockptr(0, pmd);
++					if (lock)
++						spin_lock(ptl);
++					else
++						spin_unlock(ptl);
++				}
++			}
++		}
++	}
++#endif
++	if (!lock)
++		spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 4
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
++                                             unsigned int cpu, unsigned seq)
 +{
 +	unsigned long pfn = page_to_pfn(page);
-+	int rc;
 +
 +	if (PageHighMem(page)) {
 +		if (pgprot_val(flags) & _PAGE_RW)
@@ -20517,12 +20807,18 @@
 +		else
 +			set_bit(PG_pinned, &page->flags);
 +	} else {
-+		rc = HYPERVISOR_update_va_mapping(
-+			(unsigned long)__va(pfn << PAGE_SHIFT),
-+			pfn_pte(pfn, flags), 0);
-+		if (rc)
-+			BUG();
++		MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++				(unsigned long)__va(pfn << PAGE_SHIFT),
++				pfn_pte(pfn, flags), 0);
++		if (unlikely(++seq == PIN_BATCH)) {
++			if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++		                                                PIN_BATCH, NULL)))
++				BUG();
++			seq = 0;
++		}
 +	}
++
++	return seq;
 +}
 +
 +static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
@@ -20530,37 +20826,48 @@
 +	pgd_t *pgd = pgd_base;
 +	pud_t *pud;
 +	pmd_t *pmd;
-+	int    g, u, m, rc;
++	int    g, u, m;
++	unsigned int cpu, seq;
 +
 +	if (xen_feature(XENFEAT_auto_translated_physmap))
 +		return;
 +
-+	for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++	cpu = get_cpu();
++
++	for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
 +		if (pgd_none(*pgd))
 +			continue;
 +		pud = pud_offset(pgd, 0);
 +		if (PTRS_PER_PUD > 1) /* not folded */
-+			pgd_walk_set_prot(virt_to_page(pud),flags);
++			seq = pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
 +		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
 +			if (pud_none(*pud))
 +				continue;
 +			pmd = pmd_offset(pud, 0);
 +			if (PTRS_PER_PMD > 1) /* not folded */
-+				pgd_walk_set_prot(virt_to_page(pmd),flags);
++				seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
 +			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
 +				if (pmd_none(*pmd))
 +					continue;
-+				pgd_walk_set_prot(pmd_page(*pmd),flags);
++				seq = pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
 +			}
 +		}
 +	}
 +
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)pgd_base,
-+		pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-+		UVMF_TLB_FLUSH);
-+	if (rc)
++	if (likely(seq != 0)) {
++		MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++			(unsigned long)pgd_base,
++			pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++			UVMF_TLB_FLUSH);
++		if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++		                                        seq + 1, NULL)))
++			BUG();
++	} else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
++			pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++			UVMF_TLB_FLUSH))
 +		BUG();
++
++	put_cpu();
 +}
 +
 +static void __pgd_pin(pgd_t *pgd)
@@ -20588,18 +20895,18 @@
 +{
 +	if (xen_feature(XENFEAT_writable_page_tables))
 +		return;
-+	spin_lock(&mm->page_table_lock);
++	pin_lock(mm);
 +	__pgd_pin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
++	pin_unlock(mm);
 +}
 +
 +void mm_unpin(struct mm_struct *mm)
 +{
 +	if (xen_feature(XENFEAT_writable_page_tables))
 +		return;
-+	spin_lock(&mm->page_table_lock);
++	pin_lock(mm);
 +	__pgd_unpin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
++	pin_unlock(mm);
 +}
 +
 +void mm_pin_all(void)
@@ -20657,10 +20964,118 @@
 +	    !mm->context.has_foreign_mappings)
 +		mm_unpin(mm);
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/oprofile/Makefile tmp-linux-2.6-xen.patch/arch/i386/oprofile/Makefile
---- pristine-linux-2.6.18.2/arch/i386/oprofile/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/oprofile/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/mm/pgtable.c
+--- a/arch/i386/mm/pgtable.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/mm/pgtable.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,6 +12,7 @@
+ #include <linux/slab.h>
+ #include <linux/pagemap.h>
+ #include <linux/spinlock.h>
++#include <linux/module.h>
+ 
+ #include <asm/system.h>
+ #include <asm/pgtable.h>
+@@ -137,6 +138,10 @@
+ 	__flush_tlb_one(vaddr);
+ }
+ 
++static int nr_fixmaps = 0;
++unsigned long __FIXADDR_TOP = 0xfffff000;
++EXPORT_SYMBOL(__FIXADDR_TOP);
++
+ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+ {
+ 	unsigned long address = __fix_to_virt(idx);
+@@ -146,6 +151,13 @@
+ 		return;
+ 	}
+ 	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++	nr_fixmaps++;
++}
++
++void set_fixaddr_top(unsigned long top)
++{
++	BUG_ON(nr_fixmaps > 0);
++	__FIXADDR_TOP = top - PAGE_SIZE;
+ }
+ 
+ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+@@ -214,9 +226,10 @@
+ 		spin_lock_irqsave(&pgd_lock, flags);
+ 	}
+ 
+-	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+-			swapper_pg_dir + USER_PTRS_PER_PGD,
+-			KERNEL_PGD_PTRS);
++	if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
++		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++				swapper_pg_dir + USER_PTRS_PER_PGD,
++				KERNEL_PGD_PTRS);
+ 	if (PTRS_PER_PMD > 1)
+ 		return;
+ 
+@@ -248,6 +261,30 @@
+ 			goto out_oom;
+ 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ 	}
++
++	if (!HAVE_SHARED_KERNEL_PMD) {
++		unsigned long flags;
++
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++			if (!pmd)
++				goto out_oom;
++			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++		}
++
++		spin_lock_irqsave(&pgd_lock, flags);
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++			pgd_t *kpgd = pgd_offset_k(v);
++			pud_t *kpud = pud_offset(kpgd, v);
++			pmd_t *kpmd = pmd_offset(kpud, v);
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			memcpy(pmd, kpmd, PAGE_SIZE);
++		}
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++	}
++
+ 	return pgd;
+ 
+ out_oom:
+@@ -262,9 +299,23 @@
+ 	int i;
+ 
+ 	/* in the PAE case user pgd entries are overwritten before usage */
+-	if (PTRS_PER_PMD > 1)
+-		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+-			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
++	if (PTRS_PER_PMD > 1) {
++		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			kmem_cache_free(pmd_cache, pmd);
++		}
++		if (!HAVE_SHARED_KERNEL_PMD) {
++			unsigned long flags;
++			spin_lock_irqsave(&pgd_lock, flags);
++			pgd_list_del(pgd);
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++				kmem_cache_free(pmd_cache, pmd);
++			}
++		}
++	}
+ 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
+ 	kmem_cache_free(pgd_cache, pgd);
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/oprofile/Makefile
+--- a/arch/i386/oprofile/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/oprofile/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -6,7 +6,14 @@
  		oprofilefs.o oprofile_stats.o  \
  		timer_int.o )
  
@@ -20675,9 +21090,9 @@
  					   op_model_ppro.o op_model_p4.o
  oprofile-$(CONFIG_X86_IO_APIC)		+= nmi_timer_int.o
 +endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/oprofile/xenoprof.c tmp-linux-2.6-xen.patch/arch/i386/oprofile/xenoprof.c
---- pristine-linux-2.6.18.2/arch/i386/oprofile/xenoprof.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/oprofile/xenoprof.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/oprofile/xenoprof.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/oprofile/xenoprof.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,179 @@
 +/**
 + * @file xenoprof.c
@@ -20733,8 +21148,8 @@
 +		counter.kernel    = (uint32_t)counter_config[i].kernel;
 +		counter.user      = (uint32_t)counter_config[i].user;
 +		counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
-+		HYPERVISOR_xenoprof_op(XENOPROF_counter, 
-+				       &counter);
++		WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_counter,
++					       &counter));
 +	}
 +}
 +
@@ -20858,10 +21273,24 @@
 +{
 +	xenoprofile_exit();
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/pci/irq-xen.c tmp-linux-2.6-xen.patch/arch/i386/pci/irq-xen.c
---- pristine-linux-2.6.18.2/arch/i386/pci/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/pci/irq-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1205 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/pci/Makefile
+--- a/arch/i386/pci/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/pci/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -3,6 +3,10 @@
+ obj-$(CONFIG_PCI_BIOS)		+= pcbios.o
+ obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig.o direct.o
+ obj-$(CONFIG_PCI_DIRECT)	+= direct.o
++
++# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront.o
+ 
+ pci-y				:= fixup.o
+ pci-$(CONFIG_ACPI)		+= acpi.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/pci/irq-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/pci/irq-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1211 @@
 +/*
 + *	Low-Level PCI Support for PC -- Routing of Interrupts
 + *
@@ -21123,13 +21552,13 @@
 + */
 +static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
 +{
-+	static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
++	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
 +	return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
 +}
 +
 +static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
 +{
-+	static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
++	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
 +	write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
 +	return 1;
 +}
@@ -21411,6 +21840,12 @@
 +		case PCI_DEVICE_ID_INTEL_ICH8_2:
 +		case PCI_DEVICE_ID_INTEL_ICH8_3:
 +		case PCI_DEVICE_ID_INTEL_ICH8_4:
++		case PCI_DEVICE_ID_INTEL_ICH9_0:
++		case PCI_DEVICE_ID_INTEL_ICH9_1:
++		case PCI_DEVICE_ID_INTEL_ICH9_2:
++		case PCI_DEVICE_ID_INTEL_ICH9_3:
++		case PCI_DEVICE_ID_INTEL_ICH9_4:
++		case PCI_DEVICE_ID_INTEL_ICH9_5:
 +			r->name = "PIIX/ICH";
 +			r->get = pirq_piix_get;
 +			r->set = pirq_piix_set;
@@ -22067,32 +22502,9 @@
 +
 +	return count;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/pci/Makefile tmp-linux-2.6-xen.patch/arch/i386/pci/Makefile
---- pristine-linux-2.6.18.2/arch/i386/pci/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/pci/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -4,6 +4,10 @@ obj-$(CONFIG_PCI_BIOS)		+= pcbios.o
- obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig.o direct.o
- obj-$(CONFIG_PCI_DIRECT)	+= direct.o
- 
-+# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront.o
-+
- pci-y				:= fixup.o
- pci-$(CONFIG_ACPI)		+= acpi.o
- pci-y				+= legacy.o irq.o
-@@ -12,3 +16,8 @@ pci-$(CONFIG_X86_VISWS)		:= visws.o fixu
- pci-$(CONFIG_X86_NUMAQ)		:= numa.o irq.o
- 
- obj-y				+= $(pci-y) common.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/pci/pcifront.c tmp-linux-2.6-xen.patch/arch/i386/pci/pcifront.c
---- pristine-linux-2.6.18.2/arch/i386/pci/pcifront.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/pci/pcifront.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/pci/pcifront.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/pci/pcifront.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,55 @@
 +/*
 + * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
@@ -22149,68 +22561,48 @@
 +}
 +
 +arch_initcall(pcifront_x86_stub_init);
-diff -Nurp pristine-linux-2.6.18.2/arch/i386/power/Makefile tmp-linux-2.6-xen.patch/arch/i386/power/Makefile
---- pristine-linux-2.6.18.2/arch/i386/power/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/i386/power/Makefile	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/power/Makefile
+--- a/arch/i386/power/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/power/Makefile	Wed Sep 10 10:54:08 2008 +0100
 @@ -1,2 +1,4 @@
 -obj-$(CONFIG_PM)		+= cpu.o
-+obj-$(CONFIG_PM_LEGACY)		+= cpu.o
++obj-$(subst m,y,$(CONFIG_APM))	+= cpu.o
 +obj-$(CONFIG_SOFTWARE_SUSPEND)	+= cpu.o
 +obj-$(CONFIG_ACPI_SLEEP)	+= cpu.o
  obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/dig/setup.c tmp-linux-2.6-xen.patch/arch/ia64/dig/setup.c
---- pristine-linux-2.6.18.2/arch/ia64/dig/setup.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/dig/setup.c	2007-07-30 16:35:11.000000000 +0200
-@@ -24,6 +24,8 @@
- #include <asm/machvec.h>
- #include <asm/system.h>
+diff -r d894e36cfc30 -r 0aa021803deb arch/i386/power/cpu.c
+--- a/arch/i386/power/cpu.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/i386/power/cpu.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -62,11 +62,12 @@
  
-+#include <xen/xencons.h>
-+
- void __init
- dig_setup (char **cmdline_p)
+ static void fix_processor_context(void)
  {
-@@ -67,4 +69,19 @@ dig_setup (char **cmdline_p)
- 	screen_info.orig_video_mode = 3;	/* XXX fake */
- 	screen_info.orig_video_isVGA = 1;	/* XXX fake */
- 	screen_info.orig_video_ega_bx = 3;	/* XXX fake */
-+#ifdef CONFIG_XEN
-+	if (!is_running_on_xen() || !is_initial_xendomain())
-+		return;
-+
-+	if (xen_start_info->console.dom0.info_size >=
-+	    sizeof(struct dom0_vga_console_info)) {
-+		const struct dom0_vga_console_info *info =
-+		        (struct dom0_vga_console_info *)(
-+		                (char *)xen_start_info +
-+		                xen_start_info->console.dom0.info_off);
-+		dom0_init_screen_info(info);
-+	}
-+	xen_start_info->console.domU.mfn = 0;
-+	xen_start_info->console.domU.evtchn = 0;
++#ifndef CONFIG_X86_NO_TSS
+ 	int cpu = smp_processor_id();
+ 	struct tss_struct * t = &per_cpu(init_tss, cpu);
+ 
+ 	set_tss_desc(cpu,t);	/* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+-
 +#endif
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/Kconfig tmp-linux-2.6-xen.patch/arch/ia64/Kconfig
---- pristine-linux-2.6.18.2/arch/ia64/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -58,6 +58,34 @@ config GENERIC_IOMAP
+ 	load_TR_desc();				/* This does ltr */
+ 	load_LDT(&current->active_mm->context);	/* This does lldt */
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/Kconfig
+--- a/arch/ia64/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -57,6 +57,28 @@
+ config GENERIC_IOMAP
  	bool
  	default y
- 
++
 +config XEN
 +	bool "Xen hypervisor support"
 +	default y
++	select XEN_XENCOMM
 +	help
 +	  Enable Xen hypervisor support.  Resulting kernel runs
 +	  both as a guest OS on Xen and natively on hardware.
 +
-+config XEN_IA64_VDSO_PARAVIRT
-+	bool
-+	depends on XEN && !ITANIUM
-+	default y
-+	help
-+	  vDSO paravirtualization
-+
 +config XEN_IA64_EXPOSE_P2M
 +	bool "Xen/IA64 exposure p2m table"
 +	depends on XEN
@@ -22224,14 +22616,55 @@
 +	default y
 +	help
 +	  use dtr to map the exposed p2m table
-+
+ 
  config SCHED_NO_NO_OMIT_FRAME_POINTER
  	bool
- 	default y
-@@ -465,6 +493,21 @@ config PCI_DOMAINS
+@@ -132,6 +154,10 @@
+ 
+ config IA64_HP_SIM
+ 	bool "Ski-simulator"
++
++config IA64_XEN
++	bool "Xen guest"
++	depends on XEN
+ 
+ endchoice
+ 
+@@ -431,6 +457,29 @@
+ 
+ source "drivers/sn/Kconfig"
+ 
++config KEXEC
++	bool "kexec system call (EXPERIMENTAL)"
++	depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) && !XEN_UNPRIVILEGED_GUEST
++	help
++	  kexec is a system call that implements the ability to shutdown your
++	  current kernel, and to start another kernel.  It is like a reboot
++	  but it is indepedent of the system firmware.   And like a reboot
++	  you can start any kernel with it, not just Linux.
++
++	  The name comes from the similiarity to the exec system call.
++
++	  It is an ongoing process to be certain the hardware in a machine
++	  is properly shutdown, so do not be surprised if this code does not
++	  initially work for you.  It may help to enable device hotplugging
++	  support.  As of this writing the exact hardware interface is
++	  strongly in flux, so no good recommendation can be made.
++
++config CRASH_DUMP
++	  bool "kernel crash dumps (EXPERIMENTAL)"
++	  depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
++	  help
++	    Generate crash dump after being started by kexec.
++
+ source "drivers/firmware/Kconfig"
+ 
+ source "fs/Kconfig.binfmt"
+@@ -464,6 +513,21 @@
+ config PCI_DOMAINS
  	bool
  	default PCI
- 
++
 +config XEN_PCIDEV_FRONTEND
 +	bool "Xen PCI Frontend"
 +	depends on PCI && XEN
@@ -22246,11 +22679,10 @@
 +	default n
 +	help
 +	  Enables some debug statements within the PCI Frontend.
-+
+ 
  source "drivers/pci/pcie/Kconfig"
  
- source "drivers/pci/Kconfig"
-@@ -528,3 +571,13 @@ source "arch/ia64/Kconfig.debug"
+@@ -528,3 +592,16 @@
  source "security/Kconfig"
  
  source "crypto/Kconfig"
@@ -22261,186 +22693,1210 @@
 +if XEN
 +config XEN_SMPBOOT
 +	default n
++
++config XEN_DEVMEM
++	default n
 +endif
 +
 +source "drivers/xen/Kconfig"
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/asm-offsets.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/asm-offsets.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/asm-offsets.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/asm-offsets.c	2007-07-30 16:35:11.000000000 +0200
-@@ -268,4 +268,29 @@ void foo(void)
- 	DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
- 	DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
- 	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/Makefile
+--- a/arch/ia64/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -36,6 +36,12 @@
+ endif
+ 
+ CFLAGS += $(cflags-y)
 +
-+#ifdef CONFIG_XEN
-+	BLANK();
++cppflags-$(CONFIG_XEN) += \
++	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
 +
-+#define DEFINE_MAPPED_REG_OFS(sym, field) \
-+	DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
++CPPFLAGS += $(cppflags-y)
 +
-+	DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
-+	DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
-+	DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
-+	DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
-+	DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
-+	DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
-+	DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
-+	DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
-+	DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
-+	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
-+	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
-+	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
-+	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
-+	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
-+	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
-+	DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
-+	DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);    
-+#endif /* CONFIG_XEN */
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/entry.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/entry.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/entry.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/entry.S	2007-07-30 16:35:11.000000000 +0200
-@@ -180,7 +180,7 @@ END(sys_clone)
-  *	called.  The code starting at .map relies on this.  The rest of the code
-  *	doesn't care about the interrupt masking status.
-  */
--GLOBAL_ENTRY(ia64_switch_to)
-+GLOBAL_ENTRY(__ia64_switch_to)
- 	.prologue
- 	alloc r16=ar.pfs,1,0,0,0
- 	DO_SAVE_SWITCH_STACK
-@@ -234,7 +234,7 @@ GLOBAL_ENTRY(ia64_switch_to)
- 	;;
- 	srlz.d
- 	br.cond.sptk .done
--END(ia64_switch_to)
-+END(__ia64_switch_to)
+ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
  
- /*
-  * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
-@@ -375,7 +375,7 @@ END(save_switch_stack)
-  *	- b7 holds address to return to
-  *	- must not touch r8-r11
-  */
--ENTRY(load_switch_stack)
-+GLOBAL_ENTRY(load_switch_stack)
- 	.prologue
- 	.altrp b7
+ libs-y				+= arch/ia64/lib/
+@@ -45,7 +51,9 @@
+ core-$(CONFIG_IA64_GENERIC) 	+= arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
++core-$(CONFIG_IA64_XEN)		+= arch/ia64/dig/
+ core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
++core-$(CONFIG_XEN)		+= arch/ia64/xen/
  
-@@ -510,7 +510,7 @@ END(clone)
- 	 * because some system calls (such as ia64_execve) directly
- 	 * manipulate ar.pfs.
- 	 */
--GLOBAL_ENTRY(ia64_trace_syscall)
-+GLOBAL_ENTRY(__ia64_trace_syscall)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * We need to preserve the scratch registers f6-f11 in case the system
-@@ -582,7 +582,7 @@ strace_error:
- (p6)	mov r10=-1
- (p6)	mov r8=r9
- 	br.cond.sptk .strace_save_retval
--END(ia64_trace_syscall)
-+END(__ia64_trace_syscall)
+ drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
+ drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
+@@ -75,8 +83,8 @@
+ boot:	lib/lib.a vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) $@
  
- 	/*
- 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
-@@ -601,7 +601,7 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
- .ret4:	br.cond.sptk ia64_leave_kernel
- END(ia64_strace_leave_kernel)
+-install: vmlinux.gz
+-	sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
++install:
++	-yes | sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
  
--GLOBAL_ENTRY(ia64_ret_from_clone)
-+GLOBAL_ENTRY(__ia64_ret_from_clone)
- 	PT_REGS_UNWIND_INFO(0)
- {	/*
- 	 * Some versions of gas generate bad unwind info if the first instruction of a
-@@ -627,7 +627,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
- 	cmp.ne p6,p0=r2,r0
- (p6)	br.cond.spnt .strace_check_retval
- 	;;					// added stop bits to prevent r8 dependency
--END(ia64_ret_from_clone)
-+END(__ia64_ret_from_clone)
- 	// fall through
- GLOBAL_ENTRY(ia64_ret_from_syscall)
- 	PT_REGS_UNWIND_INFO(0)
-@@ -635,8 +635,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
- 	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
- 	mov r10=r0				// clear error indication in r10
- (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
-+	;;
-+	// don't fall through, ia64_leave_syscall may be #define'd
-+	br.cond.sptk.few ia64_leave_syscall
-+	;;
- END(ia64_ret_from_syscall)
--	// fall through
- /*
-  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
-  *	need to switch to bank 0 and doesn't restore the scratch registers.
-@@ -681,7 +684,7 @@ END(ia64_ret_from_syscall)
-  *	      ar.csd: cleared
-  *	      ar.ssd: cleared
-  */
--ENTRY(ia64_leave_syscall)
-+GLOBAL_ENTRY(__ia64_leave_syscall)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -789,7 +792,7 @@ ENTRY(ia64_leave_syscall)
- 	mov.m ar.ssd=r0			// M2   clear ar.ssd
- 	mov f11=f0			// F    clear f11
- 	br.cond.sptk.many rbs_switch	// B
--END(ia64_leave_syscall)
-+END(__ia64_leave_syscall)
+ define archhelp
+   echo '* compressed	- Build compressed kernel image'
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/hp/common/sba_iommu.c
+--- a/arch/ia64/hp/common/sba_iommu.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/hp/common/sba_iommu.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -42,6 +42,11 @@
+ #include <asm/system.h>		/* wmb() */
  
- #ifdef CONFIG_IA32_SUPPORT
- GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
-@@ -801,10 +804,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
- 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
- 	.mem.offset 8,0
- 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
-+	;;
-+	// don't fall through, ia64_leave_kernel may be #define'd
-+	br.cond.sptk.few ia64_leave_kernel
-+	;;
- END(ia64_ret_from_ia32_execve)
--	// fall through
- #endif /* CONFIG_IA32_SUPPORT */
--GLOBAL_ENTRY(ia64_leave_kernel)
-+GLOBAL_ENTRY(__ia64_leave_kernel)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -1135,7 +1141,7 @@ skip_rbs_switch:
- 	ld8 r10=[r3]
- 	br.cond.sptk.many .work_processed_syscall	// re-check
+ #include <asm/acpi-ext.h>
++#include <asm/maddr.h>		/* range_straddles_page_boundary() */
++#ifdef CONFIG_XEN
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++#endif
  
--END(ia64_leave_kernel)
-+END(__ia64_leave_kernel)
+ #define PFX "IOC: "
  
- ENTRY(handle_syscall_error)
- 	/*
-@@ -1175,7 +1181,7 @@ END(ia64_invoke_schedule_tail)
- 	 * be set up by the caller.  We declare 8 input registers so the system call
- 	 * args get preserved, in case we need to restart a system call.
- 	 */
--ENTRY(notify_resume_user)
-+GLOBAL_ENTRY(notify_resume_user)
- 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- 	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- 	mov r9=ar.unat
-@@ -1263,7 +1269,7 @@ ENTRY(sys_rt_sigreturn)
- 	adds sp=16,sp
- 	;;
- 	ld8 r9=[sp]				// load new ar.unat
--	mov.sptk b7=r8,ia64_leave_kernel
-+	mov.sptk b7=r8,__ia64_leave_kernel
- 	;;
+@@ -198,6 +203,9 @@
+ 	void __iomem	*ioc_hpa;	/* I/O MMU base address */
+ 	char		*res_map;	/* resource map, bit == pdir entry */
+ 	u64		*pdir_base;	/* physical base address */
++#ifdef CONFIG_XEN
++	u64		*xen_virt_cache;
++#endif
+ 	unsigned long	ibase;		/* pdir IOV Space base */
+ 	unsigned long	imask;		/* pdir IOV Space mask */
+ 
+@@ -762,14 +770,21 @@
+  * on the vba.
+  */
+ 
+-#if 1
+-#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)	\
+-						      | 0x8000000000000000ULL)
++#ifndef CONFIG_XEN
++#define sba_io_pdir_entry(ioc, pdir_ptr, vba) *pdir_ptr =	\
++	((virt_to_bus((void *)vba) & ~0xFFFULL) | 0x8000000000000000ULL)
+ #else
+ void SBA_INLINE
+-sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
++sba_io_pdir_entry(struct ioc *ioc, u64 *pdir_ptr, unsigned long vba)
+ {
+-	*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
++	*pdir_ptr = ((virt_to_bus((void *)vba) & ~0xFFFULL) |
++		    0x80000000000000FFULL);
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		int pide = ((u64)pdir_ptr - (u64)ioc->pdir_base) >> 3;
++		ioc->xen_virt_cache[pide] = vba;
++	}
++#endif
+ }
+ #endif
+ 
+@@ -783,6 +798,12 @@
+ mark_clean (void *addr, size_t size)
+ {
+ 	unsigned long pg_addr, end;
++
++#ifdef CONFIG_XEN
++	/* XXX: Bad things happen starting domUs when this is enabled. */
++	if (is_running_on_xen())
++		return;
++#endif
+ 
+ 	pg_addr = PAGE_ALIGN((unsigned long) addr);
+ 	end = (unsigned long) addr + size;
+@@ -850,6 +871,10 @@
+ 		*/
+ 		ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
+ #endif
++#ifdef CONFIG_XEN
++		if (is_running_on_xen())
++			ioc->xen_virt_cache[off] = 0UL;
++#endif
+ 	} else {
+ 		u32 t = get_iovp_order(byte_cnt) + iovp_shift;
+ 
+@@ -864,6 +889,10 @@
+ 			ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
+ #else
+ 			ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
++#endif
++#ifdef CONFIG_XEN
++			if (is_running_on_xen())
++				ioc->xen_virt_cache[off] = 0UL;
+ #endif
+ 			off++;
+ 			byte_cnt -= iovp_size;
+@@ -894,15 +923,29 @@
+ 	unsigned long flags;
+ #endif
+ #ifdef ALLOW_IOV_BYPASS
+-	unsigned long pci_addr = virt_to_phys(addr);
++	unsigned long pci_addr;
++#endif
++
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		void* tmp_addr = addr;
++		size_t tmp_size = size;
++		do {
++			gnttab_dma_use_page(virt_to_page(tmp_addr));
++			tmp_addr += PAGE_SIZE;
++			tmp_size -= min(tmp_size, PAGE_SIZE);
++		} while (tmp_size);
++	}
+ #endif
+ 
+ #ifdef ALLOW_IOV_BYPASS
++	pci_addr = virt_to_bus(addr);
+ 	ASSERT(to_pci_dev(dev)->dma_mask);
+ 	/*
+  	** Check if the PCI device can DMA to ptr... if so, just return ptr
+  	*/
+-	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
++	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0 &&
++                   !range_straddles_page_boundary(__pa(addr), size))) {
+ 		/*
+  		** Device is bit capable of DMA'ing to the buffer...
+ 		** just return the PCI address of ptr
+@@ -944,7 +987,7 @@
+ 
+ 	while (size > 0) {
+ 		ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
+-		sba_io_pdir_entry(pdir_start, (unsigned long) addr);
++		sba_io_pdir_entry(ioc, pdir_start, (unsigned long) addr);
+ 
+ 		DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
+ 
+@@ -973,14 +1016,58 @@
+ 	void	*addr;
+ 
+ 	if (size <= iovp_size) {
+-		addr = phys_to_virt(ioc->pdir_base[off] &
+-		                    ~0xE000000000000FFFULL);
++#ifdef CONFIG_XEN
++		if (is_running_on_xen())
++			addr = (void *)ioc->xen_virt_cache[off];
++		else
++			addr = bus_to_virt(ioc->pdir_base[off] &
++					   ~0xE000000000000FFFULL);
++#else
++		addr = bus_to_virt(ioc->pdir_base[off] &
++				   ~0xE000000000000FFFULL);
++#endif
+ 		mark_clean(addr, size);
+ 	} else {
+ 		do {
+-			addr = phys_to_virt(ioc->pdir_base[off] &
+-			                    ~0xE000000000000FFFULL);
++#ifdef CONFIG_XEN
++			if (is_running_on_xen())
++				addr = (void *)ioc->xen_virt_cache[off];
++			else
++				addr = bus_to_virt(ioc->pdir_base[off] &
++						   ~0xE000000000000FFFULL);
++#else
++			addr = bus_to_virt(ioc->pdir_base[off] &
++					   ~0xE000000000000FFFULL);
++#endif
+ 			mark_clean(addr, min(size, iovp_size));
++			off++;
++			size -= iovp_size;
++		} while (size > 0);
++	}
++}
++#endif
++
++#ifdef CONFIG_XEN
++static void
++sba_gnttab_dma_unmap_page(struct ioc *ioc, dma_addr_t iova, size_t size)
++{
++	u32 iovp = (u32) SBA_IOVP(ioc,iova);
++	int off = PDIR_INDEX(iovp);
++	struct page *page;
++
++	if (size <= iovp_size) {
++		BUG_ON(!ioc->xen_virt_cache[off]);
++		page = virt_to_page(ioc->xen_virt_cache[off]);
++		__gnttab_dma_unmap_page(page);
++	} else {
++		struct page *last_page = (struct page *)~0UL;
++		do {
++			BUG_ON(!ioc->xen_virt_cache[off]);
++			page = virt_to_page(ioc->xen_virt_cache[off]);
++			if (page != last_page) {
++				__gnttab_dma_unmap_page(page);
++				last_page = page;
++			}
+ 			off++;
+ 			size -= iovp_size;
+ 		} while (size > 0);
+@@ -1018,7 +1105,16 @@
+ 
+ #ifdef ENABLE_MARK_CLEAN
+ 		if (dir == DMA_FROM_DEVICE) {
+-			mark_clean(phys_to_virt(iova), size);
++			mark_clean(bus_to_virt(iova), size);
++		}
++#endif
++#ifdef CONFIG_XEN
++		if (is_running_on_xen()) {
++			do {
++				gnttab_dma_unmap_page(iova);
++				iova += PAGE_SIZE;
++				size -= min(size,PAGE_SIZE);
++			} while (size);
+ 		}
+ #endif
+ 		return;
+@@ -1036,6 +1132,10 @@
+ #ifdef ENABLE_MARK_CLEAN
+ 	if (dir == DMA_FROM_DEVICE)
+ 		sba_mark_clean(ioc, iova, size);
++#endif
++#ifdef CONFIG_XEN
++	if (is_running_on_xen())
++		sba_gnttab_dma_unmap_page(ioc, iova, size);
+ #endif
+ 
+ #if DELAYED_RESOURCE_CNT > 0
+@@ -1102,9 +1202,14 @@
+ 		return NULL;
+ 
+ 	memset(addr, 0, size);
+-	*dma_handle = virt_to_phys(addr);
+ 
+ #ifdef ALLOW_IOV_BYPASS
++#ifdef CONFIG_XEN
++	if (xen_create_contiguous_region((unsigned long)addr, get_order(size),
++					 fls64(dev->coherent_dma_mask)))
++		goto iommu_map;
++#endif
++	*dma_handle = virt_to_bus(addr);
+ 	ASSERT(dev->coherent_dma_mask);
+ 	/*
+  	** Check if the PCI device can DMA to ptr... if so, just return ptr
+@@ -1115,6 +1220,9 @@
+ 
+ 		return addr;
+ 	}
++#ifdef CONFIG_XEN
++iommu_map:
++#endif
+ #endif
+ 
+ 	/*
+@@ -1138,6 +1246,13 @@
+  */
+ void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+ {
++#if defined(ALLOW_IOV_BYPASS) && defined(CONFIG_XEN)
++	struct ioc *ioc = GET_IOC(dev);
++
++	if (likely((dma_handle & ioc->imask) != ioc->ibase))
++		xen_destroy_contiguous_region((unsigned long)vaddr,
++					      get_order(size));
++#endif
+ 	sba_unmap_single(dev, dma_handle, size, 0);
+ 	free_pages((unsigned long) vaddr, get_order(size));
+ }
+@@ -1219,7 +1334,7 @@
+ 			dma_offset=0;	/* only want offset on first chunk */
+ 			cnt = ROUNDUP(cnt, iovp_size);
+ 			do {
+-				sba_io_pdir_entry(pdirp, vaddr);
++				sba_io_pdir_entry(ioc, pdirp, vaddr);
+ 				vaddr += iovp_size;
+ 				cnt -= iovp_size;
+ 				pdirp++;
+@@ -1406,7 +1521,11 @@
+ 	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
+ 		for (sg = sglist ; filled < nents ; filled++, sg++){
+ 			sg->dma_length = sg->length;
+-			sg->dma_address = virt_to_phys(sba_sg_address(sg));
++#ifdef CONFIG_XEN
++			sg->dma_address = gnttab_dma_map_page(sg->page) + sg->offset;
++#else
++			sg->dma_address = virt_to_bus(sba_sg_address(sg));
++#endif
+ 		}
+ 		return filled;
+ 	}
+@@ -1429,6 +1548,15 @@
+ #endif
+ 
+ 	prefetch(ioc->res_hint);
++
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		int i;
++
++		for (i = 0; i < nents; i++)
++			gnttab_dma_use_page(sglist[i].page);
++	}
++#endif
+ 
+ 	/*
+ 	** First coalesce the chunks and allocate I/O pdir space
+@@ -1562,11 +1690,25 @@
+ 
+ 	memset(ioc->pdir_base, 0, ioc->pdir_size);
+ 
++#ifdef CONFIG_XEN
++	/* The page table needs to be pinned in Xen memory */
++	if (xen_create_contiguous_region((unsigned long)ioc->pdir_base,
++					 get_order(ioc->pdir_size), 0))
++		panic(PFX "Couldn't contiguously map I/O Page Table\n");
++
++	ioc->xen_virt_cache = (void *) __get_free_pages(
++					GFP_KERNEL, get_order(ioc->pdir_size));
++	if (!ioc->xen_virt_cache)
++		panic(PFX "Couldn't allocate Xen virtual address cache\n");
++
++	memset(ioc->xen_virt_cache, 0, ioc->pdir_size);
++#endif
++
+ 	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
+ 		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
+ 
+ 	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
+-	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
++	WRITE_REG(virt_to_bus(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
+ 
+ 	/*
+ 	** If an AGP device is present, only use half of the IOV space
+@@ -1603,7 +1745,7 @@
+ 		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
+ 			memcpy(poison_addr, spill_poison, poison_size);
+ 
+-		prefetch_spill_page = virt_to_phys(addr);
++		prefetch_spill_page = virt_to_bus(addr);
+ 
+ 		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
+ 	}
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/Makefile
+--- a/arch/ia64/kernel/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -28,6 +28,8 @@
+ obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
+ obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o
+ obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o
++obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
++obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
+ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR)	+= uncached.o
+ obj-$(CONFIG_AUDIT)		+= audit.o
+ mca_recovery-y			+= mca_drv.o mca_drv_asm.o
+@@ -61,3 +63,61 @@
+ # We must build gate.so before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+ $(obj)/gate-data.o: $(obj)/gate.so
++
++#
++# gate page paravirtualization for xen
++#
++obj-$(CONFIG_XEN) += xengate-data.o
++
++ifeq ($(CONFIG_XEN), y)
++# The gate DSO image is built using a special linker script.
++targets += xengate.so xengate-syms.o
++endif
++
++extra-$(CONFIG_XEN) += xengate.so xengate.lds xengate.o
++
++AFLAGS_xengate.o += -D__XEN_IA64_VDSO_PARAVIRT
++$(obj)/xengate.o: $(src)/gate.S FORCE
++	$(call if_changed_dep,as_o_S)
++
++CPPFLAGS_xengate.lds := -P -C -U$(ARCH) -D__XEN_IA64_VDSO_PARAVIRT
++$(obj)/xengate.lds: $(src)/gate.lds.S
++	$(call if_changed_dep,cpp_lds_S)
++
++GATECFLAGS_xengate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
++		     $(call ld-option, -Wl$(comma)--hash-style=sysv)
++$(obj)/xengate.so: $(obj)/xengate.lds $(obj)/xengate.o FORCE
++	$(call if_changed,gate)
++
++ifeq ($(CONFIG_XEN), y)
++$(obj)/built-in.o: $(obj)/xengate-syms.o
++$(obj)/built-in.o: ld_flags += -R $(obj)/xengate-syms.o
++$(obj)/mca_recovery.o: $(obj)/gate-syms.o $(obj)/xengate-syms.o
++endif
++
++GATECFLAGS_xengate-syms.o = -r
++$(obj)/xengate-syms.o: $(obj)/xengate.lds $(obj)/xengate.o FORCE
++	$(call if_changed,gate)
++$(obj)/xengate-data.o: $(obj)/xengate.so
++
++#
++# .tmp_gate.o to calculate padding size for __kernel_syscall_via_epc
++#
++extra-$(CONFIG_XEN) += gate-skip.s .tmp_gate.o
++
++ifeq ($(CONFIG_XEN), y)
++AFLAGS_gate.o += -D__KERNEL_SYSCALL_VIA_EPC_PADDING
++$(obj)/gate.o: $(obj)/gate-skip.s FORCE
++endif
++
++$(obj)/.tmp_gate.o: $(src)/gate.S FORCE
++	$(call if_changed_dep,as_o_S)
++
++quiet_cmd_gate_size = GATE_SIZE	$@
++      cmd_gate_size = $(NM) --extern-only --print-size $(obj)/xengate.o | \
++	$(AWK) '/__kernel_syscall_via_epc/{printf "\t.skip 0x"$$2" - "}' > $@; \
++	$(NM) --extern-only --print-size $(obj)/.tmp_gate.o | \
++	$(AWK) '/__kernel_syscall_via_epc/{printf "0x"$$2"\n"}' >> $@
++
++$(obj)/gate-skip.s: $(obj)/xengate.o $(obj)/.tmp_gate.o FORCE
++	$(call if_changed,gate_size)
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/acpi.c
+--- a/arch/ia64/kernel/acpi.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/acpi.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -109,6 +109,10 @@
+ 		return "hpzx1";
+ 	} else if (!strcmp(hdr->oem_id, "SGI")) {
+ 		return "sn2";
++#ifdef CONFIG_XEN
++	} else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
++		return "xen";
++#endif
+ 	}
+ 
+ 	return "dig";
+@@ -123,6 +127,8 @@
+ 	return "sn2";
+ # elif defined (CONFIG_IA64_DIG)
+ 	return "dig";
++# elif defined (CONFIG_IA64_XEN)
++	return "xen";
+ # else
+ #	error Unknown platform.  Fix acpi.c.
+ # endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/asm-offsets.c
+--- a/arch/ia64/kernel/asm-offsets.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/asm-offsets.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -268,4 +268,29 @@
+ 	DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
+ 	DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
+ 	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
++
++#ifdef CONFIG_XEN
++	BLANK();
++
++#define DEFINE_MAPPED_REG_OFS(sym, field) \
++	DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
++
++	DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
++	DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
++	DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
++	DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
++	DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
++	DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
++	DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
++	DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
++	DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
++	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
++	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
++	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
++	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
++	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
++	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
++	DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
++	DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);    
++#endif /* CONFIG_XEN */
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/crash.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/kernel/crash.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,222 @@
++/*
++ * arch/ia64/kernel/crash.c
++ *
++ * Architecture specific (ia64) functions for kexec based crash dumps.
++ *
++ * Created by: Khalid Aziz <khalid.aziz at hp.com>
++ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
++ * Copyright (C) 2005 Intel Corp	Zou Nan hai <nanhai.zou at intel.com>
++ *
++ */
++#include <linux/smp.h>
++#include <linux/delay.h>
++#include <linux/crash_dump.h>
++#include <linux/bootmem.h>
++#include <linux/kexec.h>
++#include <linux/elfcore.h>
++#include <linux/sysctl.h>
++#include <linux/init.h>
++
++#include <asm/kdebug.h>
++#include <asm/mca.h>
++
++int kdump_status[NR_CPUS];
++atomic_t kdump_cpu_freezed;
++atomic_t kdump_in_progress;
++int kdump_on_init = 1;
++
++static inline Elf64_Word
++*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
++		size_t data_len)
++{
++	struct elf_note *note = (struct elf_note *)buf;
++	note->n_namesz = strlen(name) + 1;
++	note->n_descsz = data_len;
++	note->n_type   = type;
++	buf += (sizeof(*note) + 3)/4;
++	memcpy(buf, name, note->n_namesz);
++	buf += (note->n_namesz + 3)/4;
++	memcpy(buf, data, data_len);
++	buf += (data_len + 3)/4;
++	return buf;
++}
++
++static void
++final_note(void *buf)
++{
++	memset(buf, 0, sizeof(struct elf_note));
++}
++
++extern void ia64_dump_cpu_regs(void *);
++
++static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
++
++void
++crash_save_this_cpu()
++{
++	void *buf;
++	unsigned long cfm, sof, sol;
++
++	int cpu = smp_processor_id();
++	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
++
++	elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
++	memset(prstatus, 0, sizeof(*prstatus));
++	prstatus->pr_pid = current->pid;
++
++	ia64_dump_cpu_regs(dst);
++	cfm = dst[43];
++	sol = (cfm >> 7) & 0x7f;
++	sof = cfm & 0x7f;
++	dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
++			sof - sol);
++
++	buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
++	if (!buf)
++		return;
++	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
++			sizeof(*prstatus));
++	final_note(buf);
++}
++
++static int
++kdump_wait_cpu_freeze(void)
++{
++	int cpu_num = num_online_cpus() - 1;
++	int timeout = 1000;
++	while(timeout-- > 0) {
++		if (atomic_read(&kdump_cpu_freezed) == cpu_num)
++			return 0;
++		udelay(1000);
++	}
++	return 1;
++}
++
++void
++machine_crash_shutdown(struct pt_regs *pt)
++{
++	/* This function is only called after the system
++	 * has paniced or is otherwise in a critical state.
++	 * The minimum amount of code to allow a kexec'd kernel
++	 * to run successfully needs to happen here.
++	 *
++	 * In practice this means shooting down the other cpus in
++	 * an SMP system.
++	 */
++	kexec_disable_iosapic();
++#ifdef CONFIG_SMP
++	kdump_smp_send_stop();
++	if (kdump_wait_cpu_freeze() && kdump_on_init) 	{
++		//not all cpu response to IPI, send INIT to freeze them
++		kdump_smp_send_init();
++	}
++#endif
++}
++
++static void
++machine_kdump_on_init(void)
++{
++	local_irq_disable();
++	kexec_disable_iosapic();
++	machine_kexec(ia64_kimage);
++}
++
++void
++kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
++{
++	int cpuid;
++	local_irq_disable();
++	cpuid = smp_processor_id();
++	crash_save_this_cpu();
++	current->thread.ksp = (__u64)info->sw - 16;
++	atomic_inc(&kdump_cpu_freezed);
++	kdump_status[cpuid] = 1;
++	mb();
++	if (cpuid == 0) {
++		for (;;)
++			cpu_relax();
++	} else
++		ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
++}
++
++static int
++kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
++{
++	struct die_args *args = data;
++
++	if (!kdump_on_init)
++		return NOTIFY_DONE;
++
++	if (val != DIE_INIT_MONARCH_ENTER &&
++	    val != DIE_INIT_SLAVE_ENTER &&
++	    val != DIE_MCA_RENDZVOUS_LEAVE &&
++	    val != DIE_MCA_MONARCH_LEAVE)
++		return NOTIFY_DONE;
++
++	/* There really ought to be a check here to see if this
++	 * is a machine check rendevous. The kexec code that
++	 * was merged around 2.6.20-rc1 includes such a check.
++	 * But the check relies on infastructure that is not
++	 * available in 2.6.16. */
++
++	switch (val) {
++		case DIE_INIT_MONARCH_ENTER:
++			machine_kdump_on_init();
++			break;
++		case DIE_INIT_SLAVE_ENTER:
++			unw_init_running(kdump_cpu_freeze, NULL);
++			break;
++		case DIE_MCA_RENDZVOUS_LEAVE:
++			if (atomic_read(&kdump_in_progress))
++				unw_init_running(kdump_cpu_freeze, NULL);
++			break;
++		case DIE_MCA_MONARCH_LEAVE:
++		     /* die_register->signr indicate if MCA is recoverable */
++			if (!args->signr)
++				machine_kdump_on_init();
++			break;
++	}
++	return NOTIFY_DONE;
++}
++
++#ifdef CONFIG_SYSCTL
++static ctl_table kdump_on_init_table[] = {
++	{
++		.ctl_name = CTL_UNNUMBERED,
++		.procname = "kdump_on_init",
++		.data = &kdump_on_init,
++		.maxlen = sizeof(int),
++		.mode = 0644,
++		.proc_handler = &proc_dointvec,
++	},
++	{ .ctl_name = 0 }
++};
++
++static ctl_table sys_table[] = {
++	{
++	  .ctl_name = CTL_KERN,
++	  .procname = "kernel",
++	  .mode = 0555,
++	  .child = kdump_on_init_table,
++	},
++	{ .ctl_name = 0 }
++};
++#endif
++
++static int
++machine_crash_setup(void)
++{
++	static struct notifier_block kdump_init_notifier_nb = {
++		.notifier_call = kdump_init_notifier,
++	};
++	int ret;
++	if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
++		return ret;
++#ifdef CONFIG_SYSCTL
++	register_sysctl_table(sys_table, 0);
++#endif
++	return 0;
++}
++
++__initcall(machine_crash_setup);
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/crash_dump.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/kernel/crash_dump.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,48 @@
++/*
++ *	kernel/crash_dump.c - Memory preserving reboot related code.
++ *
++ *	Created by: Simon Horman <horms at verge.net.au>
++ *	Original code moved from kernel/crash.c
++ *	Original code comment copied from the i386 version of this file
++ */
++
++#include <linux/errno.h>
++#include <linux/types.h>
++
++#include <linux/uaccess.h>
++
++/**
++ * copy_oldmem_page - copy one page from "oldmem"
++ * @pfn: page frame number to be copied
++ * @buf: target memory address for the copy; this can be in kernel address
++ *	space or user address space (see @userbuf)
++ * @csize: number of bytes to copy
++ * @offset: offset in bytes into the page (based on pfn) to begin the copy
++ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
++ *	otherwise @buf is in kernel address space, use memcpy().
++ *
++ * Copy a page from "oldmem". For this page, there is no pte mapped
++ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
++ *
++ * Calling copy_to_user() in atomic context is not desirable. Hence first
++ * copying the data to a pre-allocated kernel page and then copying to user
++ * space in non-atomic context.
++ */
++ssize_t
++copy_oldmem_page(unsigned long pfn, char *buf,
++		size_t csize, unsigned long offset, int userbuf)
++{
++	void  *vaddr;
++
++	if (!csize)
++		return 0;
++	vaddr = __va(pfn<<PAGE_SHIFT);
++	if (userbuf) {
++		if (copy_to_user(buf, (vaddr + offset), csize)) {
++			return -EFAULT;
++		}
++	} else
++		memcpy(buf, (vaddr + offset), csize);
++	return csize;
++}
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/efi.c
+--- a/arch/ia64/kernel/efi.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/efi.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -21,11 +21,13 @@
+  *	Skip non-WB memory and ignore empty memory ranges.
+  */
+ #include <linux/module.h>
++#include <linux/bootmem.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/time.h>
+ #include <linux/efi.h>
++#include <linux/kexec.h>
+ 
+ #include <asm/io.h>
+ #include <asm/kregs.h>
+@@ -34,6 +36,11 @@
+ #include <asm/processor.h>
+ #include <asm/mca.h>
+ 
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++#include <xen/interface/memory.h>
++#include <asm/hypercall.h>
++#endif
++
+ #define EFI_DEBUG	0
+ 
+ extern efi_status_t efi_call_phys (void *, ...);
+@@ -41,7 +48,7 @@
+ struct efi efi;
+ EXPORT_SYMBOL(efi);
+ static efi_runtime_services_t *runtime;
+-static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
++static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
+ 
+ #define efi_call_virt(f, args...)	(*(f))(args)
+ 
+@@ -421,6 +428,8 @@
+ 			mem_limit = memparse(cp + 4, &cp);
+ 		} else if (memcmp(cp, "max_addr=", 9) == 0) {
+ 			max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
++		} else if (memcmp(cp, "min_addr=", 9) == 0) {
++			min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
+ 		} else {
+ 			while (*cp != ' ' && *cp)
+ 				++cp;
+@@ -428,6 +437,8 @@
+ 				++cp;
+ 		}
+ 	}
++	if (min_addr != 0UL)
++		printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
+ 	if (max_addr != ~0UL)
+ 		printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
+ 
+@@ -894,7 +905,8 @@
+ 		as = max(contig_low, md->phys_addr);
+ 		ae = min(contig_high, efi_md_end(md));
+ 
+-		/* keep within max_addr= command line arg */
++		/* keep within max_addr= and min_addr= command line arg */
++		as = max(as, min_addr);
+ 		ae = min(ae, max_addr);
+ 		if (ae <= as)
+ 			continue;
+@@ -965,6 +977,11 @@
+ 		if (!is_available_memory(md))
+ 			continue;
+ 
++#ifdef CONFIG_CRASH_DUMP
++		/* saved_max_pfn should ignore max_addr= command line arg */
++		if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
++			saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
++#endif
+ 		/*
+ 		 * Round ends inward to granule boundaries
+ 		 * Give trimmings to uncached allocator
+@@ -1004,7 +1021,8 @@
+ 		} else
+ 			ae = efi_md_end(md);
+ 
+-		/* keep within max_addr= command line arg */
++		/* keep within max_addr= and min_addr= command line arg */
++		as = max(as, min_addr);
+ 		ae = min(ae, max_addr);
+ 		if (ae <= as)
+ 			continue;
+@@ -1033,20 +1051,21 @@
+ 	*e = (u64)++k;
+ }
+ 
+-void
+-efi_initialize_iomem_resources(struct resource *code_resource,
+-			       struct resource *data_resource)
++#define EFI_INITIALISE_PHYS 0x1
++#define EFI_INITIALISE_MACH 0x2
++#define EFI_INITIALISE_ALL  (EFI_INITIALISE_PHYS|EFI_INITIALISE_MACH)
++
++static void
++efi_initialize_resources(void *efi_map_start, void *efi_map_end,
++			 u64 efi_desc_size, struct resource *root_resource,
++			 struct resource *code_resource,
++			 struct resource *data_resource, unsigned flag)
+ {
+ 	struct resource *res;
+-	void *efi_map_start, *efi_map_end, *p;
++	void *p;
+ 	efi_memory_desc_t *md;
+-	u64 efi_desc_size;
+ 	char *name;
+ 	unsigned long flags;
+-
+-	efi_map_start = __va(ia64_boot_param->efi_memmap);
+-	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+-	efi_desc_size = ia64_boot_param->efi_memdesc_size;
+ 
+ 	res = NULL;
+ 
+@@ -1106,7 +1125,7 @@
+ 		res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+ 		res->flags = flags;
+ 
+-		if (insert_resource(&iomem_resource, res) < 0)
++		if (insert_resource(root_resource, res) < 0)
+ 			kfree(res);
+ 		else {
+ 			/*
+@@ -1114,8 +1133,135 @@
+ 			 * kernel data so we try it repeatedly and
+ 			 * let the resource manager test it.
+ 			 */
+-			insert_resource(res, code_resource);
+-			insert_resource(res, data_resource);
++			if (flag & EFI_INITIALISE_PHYS) {
++				insert_resource(res, code_resource);
++				insert_resource(res, data_resource);
++			}
++#ifdef CONFIG_KEXEC
++			if (flag & EFI_INITIALISE_MACH) {
++				insert_resource(res, &efi_memmap_res);
++				insert_resource(res, &boot_param_res);
++				if (crashk_res.end > crashk_res.start)
++					insert_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++				if (is_initial_xendomain())
++					xen_machine_kexec_register_resources(
++								res);
++#endif
++			}
++#endif
+ 		}
+ 	}
+ }
++
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static void
++efi_initialize_iomem_machine_resources(void)
++{
++	unsigned long size;
++	xen_memory_map_t memmap;
++	xen_ia64_memmap_info_t *memmap_info = NULL;
++	void *efi_map_start, *efi_map_end;
++	u64 efi_desc_size;
++	int ret;
++
++	/* It would be nice if it wasn't neccessary to loop like this */
++	for (size = 1024; 1; size += 1024) {
++		memmap_info = kmalloc(size, GFP_KERNEL);
++		if (memmap_info == NULL)
++			return; /* -ENOMEM, but no way to return error */
++
++		memmap.nr_entries = size;
++		set_xen_guest_handle(memmap.buffer, memmap_info);
++		ret = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
++		if (!ret)
++			break;
++
++		kfree(memmap_info);
++	}
++
++	efi_map_start = &memmap_info->memdesc;
++	efi_map_end = efi_map_start + memmap_info->efi_memmap_size;
++	efi_desc_size = memmap_info->efi_memdesc_size;
++	efi_initialize_resources(efi_map_start, efi_map_end, efi_desc_size,
++				 &iomem_machine_resource, NULL, NULL,
++				 EFI_INITIALISE_MACH);
++
++	kfree(memmap_info);
++}
++#endif
++
++void
++efi_initialize_iomem_resources(struct resource *code_resource,
++			       struct resource *data_resource)
++{
++	void *efi_map_start, *efi_map_end;
++	u64 efi_desc_size;
++
++	efi_map_start = __va(ia64_boot_param->efi_memmap);
++	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
++	efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++	if (is_initial_xendomain()) {
++		efi_initialize_resources(efi_map_start, efi_map_end,
++					 efi_desc_size, &iomem_resource,
++					 code_resource, data_resource,
++					 EFI_INITIALISE_PHYS);
++		efi_initialize_iomem_machine_resources();
++	}
++	else
++#endif
++		efi_initialize_resources(efi_map_start, efi_map_end,
++					 efi_desc_size, &iomem_resource,
++					 code_resource, data_resource,
++					 EFI_INITIALISE_ALL);
++}
++
++
++
++#ifdef CONFIG_KEXEC
++/* find a block of memory aligned to 64M exclude reserved regions
++   rsvd_regions are sorted
++ */
++unsigned long
++kdump_find_rsvd_region (unsigned long size,
++		struct rsvd_region *r, int n)
++{
++  int i;
++  u64 start, end;
++  u64 alignment = 1UL << _PAGE_SIZE_64M;
++  void *efi_map_start, *efi_map_end, *p;
++  efi_memory_desc_t *md;
++  u64 efi_desc_size;
++
++  efi_map_start = __va(ia64_boot_param->efi_memmap);
++  efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
++  efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++  for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++	  md = p;
++	  if (!efi_wb(md))
++		  continue;
++	  start = ALIGN(md->phys_addr, alignment);
++	  end = efi_md_end(md);
++	  for (i = 0; i < n; i++) {
++		if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
++			if (__pa(r[i].start) > start + size)
++				return start;
++			start = ALIGN(__pa(r[i].end), alignment);
++			if (i < n-1 && __pa(r[i+1].start) < start + size)
++				continue;
++			else
++				break;
++		}
++	  }
++	  if (end > start + size)
++		return start;
++  }
++
++  printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
++	size);
++  return ~0UL;
++}
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/entry.S
+--- a/arch/ia64/kernel/entry.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/entry.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -180,7 +180,7 @@
+  *	called.  The code starting at .map relies on this.  The rest of the code
+  *	doesn't care about the interrupt masking status.
+  */
+-GLOBAL_ENTRY(ia64_switch_to)
++GLOBAL_ENTRY(__ia64_switch_to)
+ 	.prologue
+ 	alloc r16=ar.pfs,1,0,0,0
+ 	DO_SAVE_SWITCH_STACK
+@@ -234,7 +234,7 @@
+ 	;;
+ 	srlz.d
+ 	br.cond.sptk .done
+-END(ia64_switch_to)
++END(__ia64_switch_to)
+ 
+ /*
+  * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
+@@ -375,7 +375,7 @@
+  *	- b7 holds address to return to
+  *	- must not touch r8-r11
+  */
+-ENTRY(load_switch_stack)
++GLOBAL_ENTRY(load_switch_stack)
+ 	.prologue
+ 	.altrp b7
+ 
+@@ -510,7 +510,7 @@
+ 	 * because some system calls (such as ia64_execve) directly
+ 	 * manipulate ar.pfs.
+ 	 */
+-GLOBAL_ENTRY(ia64_trace_syscall)
++GLOBAL_ENTRY(__ia64_trace_syscall)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * We need to preserve the scratch registers f6-f11 in case the system
+@@ -582,7 +582,7 @@
+ (p6)	mov r10=-1
+ (p6)	mov r8=r9
+ 	br.cond.sptk .strace_save_retval
+-END(ia64_trace_syscall)
++END(__ia64_trace_syscall)
+ 
+ 	/*
+ 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
+@@ -601,7 +601,7 @@
+ .ret4:	br.cond.sptk ia64_leave_kernel
+ END(ia64_strace_leave_kernel)
+ 
+-GLOBAL_ENTRY(ia64_ret_from_clone)
++GLOBAL_ENTRY(__ia64_ret_from_clone)
+ 	PT_REGS_UNWIND_INFO(0)
+ {	/*
+ 	 * Some versions of gas generate bad unwind info if the first instruction of a
+@@ -627,7 +627,7 @@
+ 	cmp.ne p6,p0=r2,r0
+ (p6)	br.cond.spnt .strace_check_retval
+ 	;;					// added stop bits to prevent r8 dependency
+-END(ia64_ret_from_clone)
++END(__ia64_ret_from_clone)
+ 	// fall through
+ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ 	PT_REGS_UNWIND_INFO(0)
+@@ -635,8 +635,11 @@
+ 	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
+ 	mov r10=r0				// clear error indication in r10
+ (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
++	;;
++	// don't fall through, ia64_leave_syscall may be #define'd
++	br.cond.sptk.few ia64_leave_syscall
++	;;
+ END(ia64_ret_from_syscall)
+-	// fall through
+ /*
+  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+  *	need to switch to bank 0 and doesn't restore the scratch registers.
+@@ -681,7 +684,7 @@
+  *	      ar.csd: cleared
+  *	      ar.ssd: cleared
+  */
+-ENTRY(ia64_leave_syscall)
++GLOBAL_ENTRY(__ia64_leave_syscall)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -789,7 +792,7 @@
+ 	mov.m ar.ssd=r0			// M2   clear ar.ssd
+ 	mov f11=f0			// F    clear f11
+ 	br.cond.sptk.many rbs_switch	// B
+-END(ia64_leave_syscall)
++END(__ia64_leave_syscall)
+ 
+ #ifdef CONFIG_IA32_SUPPORT
+ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+@@ -801,10 +804,13 @@
+ 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
+ 	.mem.offset 8,0
+ 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
++	;;
++	// don't fall through, ia64_leave_kernel may be #define'd
++	br.cond.sptk.few ia64_leave_kernel
++	;;
+ END(ia64_ret_from_ia32_execve)
+-	// fall through
+ #endif /* CONFIG_IA32_SUPPORT */
+-GLOBAL_ENTRY(ia64_leave_kernel)
++GLOBAL_ENTRY(__ia64_leave_kernel)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -1135,7 +1141,7 @@
+ 	ld8 r10=[r3]
+ 	br.cond.sptk.many .work_processed_syscall	// re-check
+ 
+-END(ia64_leave_kernel)
++END(__ia64_leave_kernel)
+ 
+ ENTRY(handle_syscall_error)
+ 	/*
+@@ -1175,7 +1181,7 @@
+ 	 * be set up by the caller.  We declare 8 input registers so the system call
+ 	 * args get preserved, in case we need to restart a system call.
+ 	 */
+-ENTRY(notify_resume_user)
++GLOBAL_ENTRY(notify_resume_user)
+ 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ 	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ 	mov r9=ar.unat
+@@ -1263,7 +1269,7 @@
+ 	adds sp=16,sp
+ 	;;
+ 	ld8 r9=[sp]				// load new ar.unat
+-	mov.sptk b7=r8,ia64_leave_kernel
++	mov.sptk b7=r8,__ia64_leave_kernel
+ 	;;
  	mov ar.unat=r9
  	br.many b7
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/fsys.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/fsys.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/fsys.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/fsys.S	2007-07-30 16:35:11.000000000 +0200
-@@ -516,11 +516,34 @@ ENTRY(fsys_fallback_syscall)
+@@ -1575,7 +1581,7 @@
+ 	data8 sys_mq_timedreceive		// 1265
+ 	data8 sys_mq_notify
+ 	data8 sys_mq_getsetattr
+-	data8 sys_ni_syscall			// reserved for kexec_load
++	data8 sys_kexec_load
+ 	data8 sys_ni_syscall			// reserved for vserver
+ 	data8 sys_waitid			// 1270
+ 	data8 sys_add_key
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/fsys.S
+--- a/arch/ia64/kernel/fsys.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/fsys.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -516,11 +516,34 @@
  	adds r17=-1024,r15
  	movl r14=sys_call_table
  	;;
@@ -22475,7 +23931,7 @@
  	mov r27=ar.rsc
  	mov r21=ar.fpsr
  	mov r26=ar.pfs
-@@ -632,7 +655,25 @@ GLOBAL_ENTRY(fsys_bubble_down)
+@@ -632,7 +655,25 @@
  	mov rp=r14				// I0   set the real return addr
  	and r3=_TIF_SYSCALL_TRACEAUDIT,r3	// A
  	;;
@@ -22501,206 +23957,315 @@
  	cmp.eq p8,p0=r3,r0			// A
  (p10)	br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad call-frame or r15 is a NaT
  
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/gate.lds.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/gate.lds.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/gate.lds.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/gate.lds.S	2007-07-30 16:35:11.000000000 +0200
-@@ -43,6 +43,20 @@ SECTIONS
- 				    __start_gate_brl_fsys_bubble_down_patchlist = .;
- 				    *(.data.patch.brl_fsys_bubble_down)
- 				    __end_gate_brl_fsys_bubble_down_patchlist = .;
-+
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+				    __start_gate_running_on_xen_patchlist = .;
-+				    *(.data.patch.running_on_xen)
-+				    __end_gate_running_on_xen_patchlist = .;
-+
-+				    __start_gate_brl_xen_ssm_i_0_patchlist = .;
-+				    *(.data.patch.brl_xen_ssm_i_0)
-+				    __end_gate_brl_xen_ssm_i_0_patchlist = .;
-+
-+				    __start_gate_brl_xen_ssm_i_1_patchlist = .;
-+				    *(.data.patch.brl_xen_ssm_i_1)
-+				    __end_gate_brl_xen_ssm_i_1_patchlist = .;
-+#endif
-   }									:readable
-   .IA_64.unwind_info		: { *(.IA_64.unwind_info*) }
-   .IA_64.unwind			: { *(.IA_64.unwind*) }			:readable :unwind
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/gate.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/gate.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/gate.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/gate.S	2007-07-30 16:35:11.000000000 +0200
-@@ -13,6 +13,9 @@
- #include <asm/sigcontext.h>
- #include <asm/system.h>
- #include <asm/unistd.h>
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+# include <asm/privop.h>
-+#endif
- 
- /*
-  * We can't easily refer to symbols inside the kernel.  To avoid full runtime relocation,
-@@ -32,6 +35,40 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/gate.S
+--- a/arch/ia64/kernel/gate.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/gate.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -31,102 +31,6 @@
+ #define BRL_COND_FSYS_BUBBLE_DOWN(pr)			\
  [1:](pr)brl.cond.sptk 0;				\
  	.xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
- 
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+	// The page in which hyperprivop lives must be pinned by ITR.
-+	// However vDSO area isn't pinned. So issuing hyperprivop
-+	// from vDSO page causes trouble that Kevin pointed out.
-+	// After clearing vpsr.ic, the vcpu is pre-empted and the itlb
-+	// is flushed. Then vcpu get cpu again, tlb miss fault occures.
-+	// However it results in nested dtlb fault because vpsr.ic is off.
-+	// To avoid such a situation, we jump into the kernel text area
-+	// which is pinned, and then issue hyperprivop and return back
-+	// to vDSO page.
-+	// This is Dan Magenheimer's idea.
-+
-+	// Currently is_running_on_xen() is defined as running_on_xen.
-+	// If is_running_on_xen() is a real function, we must update
-+	// according to it.
-+	.section ".data.patch.running_on_xen", "a"
-+	.previous
-+#define LOAD_RUNNING_ON_XEN(reg)			\
-+[1:]	movl reg=0;					\
-+	.xdata4 ".data.patch.running_on_xen", 1b-.
+-
+-GLOBAL_ENTRY(__kernel_syscall_via_break)
+-	.prologue
+-	.altrp b6
+-	.body
+-	/*
+-	 * Note: for (fast) syscall restart to work, the break instruction must be
+-	 *	 the first one in the bundle addressed by syscall_via_break.
+-	 */
+-{ .mib
+-	break 0x100000
+-	nop.i 0
+-	br.ret.sptk.many b6
+-}
+-END(__kernel_syscall_via_break)
+-
+-/*
+- * On entry:
+- *	r11 = saved ar.pfs
+- *	r15 = system call #
+- *	b0  = saved return address
+- *	b6  = return address
+- * On exit:
+- *	r11 = saved ar.pfs
+- *	r15 = system call #
+- *	b0  = saved return address
+- *	all other "scratch" registers:	undefined
+- *	all "preserved" registers:	same as on entry
+- */
+-
+-GLOBAL_ENTRY(__kernel_syscall_via_epc)
+-	.prologue
+-	.altrp b6
+-	.body
+-{
+-	/*
+-	 * Note: the kernel cannot assume that the first two instructions in this
+-	 * bundle get executed.  The remaining code must be safe even if
+-	 * they do not get executed.
+-	 */
+-	adds r17=-1024,r15			// A
+-	mov r10=0				// A    default to successful syscall execution
+-	epc					// B	causes split-issue
+-}
+-	;;
+-	rsm psr.be | psr.i			// M2 (5 cyc to srlz.d)
+-	LOAD_FSYSCALL_TABLE(r14)		// X
+-	;;
+-	mov r16=IA64_KR(CURRENT)		// M2 (12 cyc)
+-	shladd r18=r17,3,r14			// A
+-	mov r19=NR_syscalls-1			// A
+-	;;
+-	lfetch [r18]				// M0|1
+-	mov r29=psr				// M2 (12 cyc)
+-	// If r17 is a NaT, p6 will be zero
+-	cmp.geu p6,p7=r19,r17			// A    (sysnr > 0 && sysnr < 1024+NR_syscalls)?
+-	;;
+-	mov r21=ar.fpsr				// M2 (12 cyc)
+-	tnat.nz p10,p9=r15			// I0
+-	mov.i r26=ar.pfs			// I0 (would stall anyhow due to srlz.d...)
+-	;;
+-	srlz.d					// M0 (forces split-issue) ensure PSR.BE==0
+-(p6)	ld8 r18=[r18]				// M0|1
+-	nop.i 0
+-	;;
+-	nop.m 0
+-(p6)	tbit.z.unc p8,p0=r18,0			// I0 (dual-issues with "mov b7=r18"!)
+-	nop.i 0
+-	;;
+-(p8)	ssm psr.i
+-(p6)	mov b7=r18				// I0
+-(p8)	br.dptk.many b7				// B
+-
+-	mov r27=ar.rsc				// M2 (12 cyc)
+-/*
+- * brl.cond doesn't work as intended because the linker would convert this branch
+- * into a branch to a PLT.  Perhaps there will be a way to avoid this with some
+- * future version of the linker.  In the meantime, we just use an indirect branch
+- * instead.
+- */
+-#ifdef CONFIG_ITANIUM
+-(p6)	add r14=-8,r14				// r14 <- addr of fsys_bubble_down entry
+-	;;
+-(p6)	ld8 r14=[r14]				// r14 <- fsys_bubble_down
+-	;;
+-(p6)	mov b7=r14
+-(p6)	br.sptk.many b7
+-#else
+-	BRL_COND_FSYS_BUBBLE_DOWN(p6)
+-#endif
+-	ssm psr.i
+-	mov r10=-1
+-(p10)	mov r8=EINVAL
+-(p9)	mov r8=ENOSYS
+-	FSYS_RETURN
+-END(__kernel_syscall_via_epc)
+ 
+ #	define ARG0_OFF		(16 + IA64_SIGFRAME_ARG0_OFFSET)
+ #	define ARG1_OFF		(16 + IA64_SIGFRAME_ARG1_OFFSET)
+@@ -373,3 +277,154 @@
+ 	// invala not necessary as that will happen when returning to user-mode
+ 	br.cond.sptk back_from_restore_rbs
+ END(__kernel_sigtramp)
 +
-+	.section ".data.patch.brl_xen_ssm_i_0", "a"
-+	.previous
-+#define BRL_COND_XEN_SSM_I_0(pr)			\
-+[1:](pr)brl.cond.sptk 0;				\
-+	.xdata4 ".data.patch.brl_xen_ssm_i_0", 1b-.
++GLOBAL_ENTRY(__kernel_syscall_via_break)
++	.prologue
++	.altrp b6
++	.body
++	/*
++	 * Note: for (fast) syscall restart to work, the break instruction must be
++	 *	 the first one in the bundle addressed by syscall_via_break.
++	 */
++{ .mib
++	break 0x100000
++	nop.i 0
++	br.ret.sptk.many b6
++}
++END(__kernel_syscall_via_break)
 +
-+	.section ".data.patch.brl_xen_ssm_i_1", "a"
-+	.previous
-+#define BRL_COND_XEN_SSM_I_1(pr)			\
-+[1:](pr)brl.cond.sptk 0;				\
-+	.xdata4 ".data.patch.brl_xen_ssm_i_1", 1b-.
-+#endif
++/*
++ * On entry:
++ *	r11 = saved ar.pfs
++ *	r15 = system call #
++ *	b0  = saved return address
++ *	b6  = return address
++ * On exit:
++ *	r11 = saved ar.pfs
++ *	r15 = system call #
++ *	b0  = saved return address
++ *	all other "scratch" registers:	undefined
++ *	all "preserved" registers:	same as on entry
++ */
 +
- GLOBAL_ENTRY(__kernel_syscall_via_break)
- 	.prologue
- 	.altrp b6
-@@ -76,7 +113,42 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
- 	epc					// B	causes split-issue
- }
- 	;;
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++GLOBAL_ENTRY(__kernel_syscall_via_epc)
++	.prologue
++	.altrp b6
++	.body
++{
++	/*
++	 * Note: the kernel cannot assume that the first two instructions in this
++	 * bundle get executed.  The remaining code must be safe even if
++	 * they do not get executed.
++	 */
++	adds r17=-1024,r15			// A
++	mov r10=0				// A    default to successful syscall execution
++	epc					// B	causes split-issue
++}
++	;;
++#ifdef __XEN_IA64_VDSO_PARAVIRT
 +	// r20 = 1
 +	// r22 = &vcpu->vcpu_info->evtchn_upcall_mask
-+	// r23 = &vpsr.ic
 +	// r24 = &vcpu->vcpu_info->evtchn_upcall_pending
 +	// r25 = tmp
-+	// r28 = &running_on_xen
-+	// r30 = running_on_xen
 +	// r31 = tmp
 +	// p11 = tmp
-+	// p12 = running_on_xen
-+	// p13 = !running_on_xen
 +	// p14 = tmp
-+	// p15 = tmp
-+#define isXen	p12
-+#define isRaw	p13
-+	LOAD_RUNNING_ON_XEN(r28)
++	mov r20=1
 +	movl r22=XSI_PSR_I_ADDR
 +	;;
 +	ld8 r22=[r22]
 +	;;
-+	movl r23=XSI_PSR_IC
++	st1 [r22]=r20
++	rum psr.be
 +	adds r24=-1,r22
-+	mov r20=1
-+	;;
-+	ld4 r30=[r28]
-+	;;
-+	cmp.ne isXen,isRaw=r0,r30
++#else
++	rsm psr.be | psr.i			// M2 (5 cyc to srlz.d)
++#endif
++	LOAD_FSYSCALL_TABLE(r14)		// X
 +	;;
-+(isRaw)	rsm psr.be | psr.i
-+(isXen)	st1 [r22]=r20
-+(isXen)	rum psr.be
++	mov r16=IA64_KR(CURRENT)		// M2 (12 cyc)
++	shladd r18=r17,3,r14			// A
++	mov r19=NR_syscalls-1			// A
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++	XEN_HYPER_GET_PSR
 +	;;
++	lfetch [r18]				// M0|1
++	mov r29=r8
 +#else
- 	rsm psr.be | psr.i			// M2 (5 cyc to srlz.d)
++	;;
++	lfetch [r18]				// M0|1
++	mov r29=psr				// M2 (12 cyc)
 +#endif
- 	LOAD_FSYSCALL_TABLE(r14)		// X
- 	;;
- 	mov r16=IA64_KR(CURRENT)		// M2 (12 cyc)
-@@ -84,7 +156,14 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
- 	mov r19=NR_syscalls-1			// A
- 	;;
- 	lfetch [r18]				// M0|1
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+(isRaw)	mov r29=psr
-+(isXen)	XEN_HYPER_GET_PSR
++	// If r17 is a NaT, p6 will be zero
++	cmp.geu p6,p7=r19,r17			// A    (sysnr > 0 && sysnr < 1024+NR_syscalls)?
++	;;
++	mov r21=ar.fpsr				// M2 (12 cyc)
++	tnat.nz p10,p9=r15			// I0
++	mov.i r26=ar.pfs			// I0 (would stall anyhow due to srlz.d...)
++	;;
++	srlz.d					// M0 (forces split-issue) ensure PSR.BE==0
++(p6)	ld8 r18=[r18]				// M0|1
++	nop.i 0
 +	;;
-+(isXen)	mov r29=r8
++	nop.m 0
++(p6)	tbit.z.unc p8,p0=r18,0			// I0 (dual-issues with "mov b7=r18"!)
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++	
++#define XEN_SET_PSR_I(pred)		\
++(pred)	ld1 r31=[r22];			\
++	;; ;				\
++(pred)	st1 [r22]=r0;			\
++(pred)	cmp.ne.unc p14,p0=r0,r31;	\
++	;; ;				\
++(p14)	ld1 r25=[r24];			\
++	;; ;				\
++(p14)	cmp.ne.unc p11,p0=r0,r25;	\
++	;; ;				\
++(p11)	XEN_HYPER_SSM_I;
++
++	;; 
++	XEN_SET_PSR_I(p8)
 +#else
- 	mov r29=psr				// M2 (12 cyc)
++	nop.i 0
++	;;
++(p8)	ssm psr.i
 +#endif
- 	// If r17 is a NaT, p6 will be zero
- 	cmp.geu p6,p7=r19,r17			// A    (sysnr > 0 && sysnr < 1024+NR_syscalls)?
- 	;;
-@@ -98,9 +177,21 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
- 	;;
- 	nop.m 0
- (p6)	tbit.z.unc p8,p0=r18,0			// I0 (dual-issues with "mov b7=r18"!)
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+	;;
-+	// p14 = running_on_xen && p8
-+	// p15 = !running_on_xen && p8
-+(p8)	cmp.ne.unc p14,p15=r0,r30
++(p6)	mov b7=r18				// I0
++(p8)	br.dptk.many b7				// B
++
++	mov r27=ar.rsc				// M2 (12 cyc)
++/*
++ * brl.cond doesn't work as intended because the linker would convert this branch
++ * into a branch to a PLT.  Perhaps there will be a way to avoid this with some
++ * future version of the linker.  In the meantime, we just use an indirect branch
++ * instead.
++ */
++#ifdef CONFIG_ITANIUM
++(p6)	add r14=-8,r14				// r14 <- addr of fsys_bubble_down entry
 +	;;
-+(p15)	ssm psr.i
-+	BRL_COND_XEN_SSM_I_0(p14)
-+	.global .vdso_ssm_i_0_ret
-+.vdso_ssm_i_0_ret:
++(p6)	ld8 r14=[r14]				// r14 <- fsys_bubble_down
++	;;
++(p6)	mov b7=r14
++(p6)	br.sptk.many b7
 +#else
- 	nop.i 0
- 	;;
- (p8)	ssm psr.i
++	BRL_COND_FSYS_BUBBLE_DOWN(p6)
 +#endif
- (p6)	mov b7=r18				// I0
- (p8)	br.dptk.many b7				// B
- 
-@@ -121,9 +212,21 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
- #else
- 	BRL_COND_FSYS_BUBBLE_DOWN(p6)
- #endif
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+(isRaw)	ssm psr.i
-+	BRL_COND_XEN_SSM_I_1(isXen)
-+	.global .vdso_ssm_i_1_ret
-+.vdso_ssm_i_1_ret:
-+#else
- 	ssm psr.i
-+#endif
- 	mov r10=-1
- (p10)	mov r8=EINVAL
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+	dv_serialize_data // shut up gas warning.
-+		          // we know xen_hyper_ssm_i_0 or xen_hyper_ssm_i_1
-+		          // doesn't change p9 and p10
-+#endif
- (p9)	mov r8=ENOSYS
- 	FSYS_RETURN
- END(__kernel_syscall_via_epc)
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/head.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/head.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/head.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/head.S	2007-07-30 16:35:11.000000000 +0200
-@@ -367,6 +367,12 @@ start_ap:
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++	XEN_SET_PSR_I(p0)
++#else
++	ssm psr.i
++#endif
++	mov r10=-1
++(p10)	mov r8=EINVAL
++(p9)	mov r8=ENOSYS
++	FSYS_RETURN
++#ifdef __KERNEL_SYSCALL_VIA_EPC_PADDING
++	/*
++	 * All values/sizes of __kernel_xxx symbol in gate.so and xengate.so
++	 * must be same to each other.
++	 * Adjust symbol size in gate.so to be same to the one in xengate.so.
++	 */
++.include "arch/ia64/kernel/gate-skip.s"
++#endif
++END(__kernel_syscall_via_epc)
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/gate.lds.S
+--- a/arch/ia64/kernel/gate.lds.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/gate.lds.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -28,6 +28,24 @@
+   . = GATE_ADDR + 0x500;
+ 
+   .data.patch			: {
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++#define __start_gate_mckinley_e9_patchlist \
++	__start_gate_mckinley_e9_patchlist_xen
++#define __end_gate_mckinley_e9_patchlist \
++	__end_gate_mckinley_e9_patchlist_xen
++#define __start_gate_vtop_patchlist \
++	__start_gate_vtop_patchlist_xen
++#define __end_gate_vtop_patchlist \
++	__end_gate_vtop_patchlist_xen
++#define __start_gate_fsyscall_patchlist \
++	__start_gate_fsyscall_patchlist_xen
++#define __end_gate_fsyscall_patchlist \
++	__end_gate_fsyscall_patchlist_xen
++#define __start_gate_brl_fsys_bubble_down_patchlist \
++	__start_gate_brl_fsys_bubble_down_patchlist_xen
++#define __end_gate_brl_fsys_bubble_down_patchlist \
++	__end_gate_brl_fsys_bubble_down_patchlist_xen
++#endif
+ 				    __start_gate_mckinley_e9_patchlist = .;
+ 				    *(.data.patch.mckinley_e9)
+ 				    __end_gate_mckinley_e9_patchlist = .;
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/head.S
+--- a/arch/ia64/kernel/head.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/head.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -366,6 +366,12 @@
+ (isBP)	movl r2=ia64_boot_param
  	;;
  (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
- 
++
 +#ifdef CONFIG_XEN
 +	//  Note: isBP is used by the subprogram.
 +	br.call.sptk.many rp=early_xen_setup
 +	;;
 +#endif
-+
+ 
  #ifdef CONFIG_SMP
  (isAP)	br.call.sptk.many rp=start_secondary
- .ret0:
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/iosapic.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/iosapic.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/iosapic.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/iosapic.c	2007-07-30 16:35:11.000000000 +0200
-@@ -159,6 +159,75 @@ static unsigned char pcat_compat __devin
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/iosapic.c
+--- a/arch/ia64/kernel/iosapic.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/iosapic.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -159,6 +159,75 @@
  static int iosapic_kmalloc_ok;
  static LIST_HEAD(free_rte_list);
  
@@ -22776,17 +24341,45 @@
  /*
   * Find an IOSAPIC associated with a GSI
   */
-@@ -653,6 +722,9 @@ register_intr (unsigned int gsi, int vec
+@@ -287,6 +356,27 @@
+ {
+ 	/* do nothing... */
+ }
++
++
++#ifdef CONFIG_KEXEC
++void
++kexec_disable_iosapic(void)
++{
++	struct iosapic_intr_info *info;
++	struct iosapic_rte_info *rte;
++	u8 vec = 0;
++	for (info = iosapic_intr_info; info <
++			iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
++		list_for_each_entry(rte, &info->rtes,
++				rte_list) {
++			iosapic_write(rte->addr,
++					IOSAPIC_RTE_LOW(rte->rte_index),
++					IOSAPIC_MASK|vec);
++			iosapic_eoi(rte->addr, vec);
++		}
++	}
++}
++#endif
+ 
+ static void
+ mask_irq (unsigned int irq)
+@@ -652,6 +742,9 @@
+ 	iosapic_intr_info[vector].polarity = polarity;
  	iosapic_intr_info[vector].dmode    = delivery;
  	iosapic_intr_info[vector].trigger  = trigger;
- 
++
 +	if (is_running_on_xen())
 +		return 0;
-+
+ 
  	if (trigger == IOSAPIC_EDGE)
  		irq_type = &irq_type_iosapic_edge;
- 	else
-@@ -1015,6 +1087,9 @@ iosapic_system_init (int system_pcat_com
+@@ -1015,6 +1108,9 @@
  	}
  
  	pcat_compat = system_pcat_compat;
@@ -22796,9 +24389,9 @@
  	if (pcat_compat) {
  		/*
  		 * Disable the compatibility mode interrupts (8259 style),
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/irq_ia64.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/irq_ia64.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/irq_ia64.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/irq_ia64.c	2007-09-30 18:06:18.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/irq_ia64.c
+--- a/arch/ia64/kernel/irq_ia64.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/irq_ia64.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -30,6 +30,9 @@
  #include <linux/smp_lock.h>
  #include <linux/threads.h>
@@ -22809,7 +24402,7 @@
  
  #include <asm/delay.h>
  #include <asm/intrinsics.h>
-@@ -69,6 +72,13 @@ int
+@@ -69,6 +72,13 @@
  assign_irq_vector (int irq)
  {
  	int pos, vector;
@@ -22823,7 +24416,7 @@
   again:
  	pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
  	vector = IA64_FIRST_DEVICE_VECTOR + pos;
-@@ -87,6 +97,13 @@ free_irq_vector (int vector)
+@@ -87,6 +97,13 @@
  	if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
  		return;
  
@@ -22837,7 +24430,7 @@
  	pos = vector - IA64_FIRST_DEVICE_VECTOR;
  	if (!test_and_clear_bit(pos, ia64_vector_mask))
  		printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
-@@ -240,12 +257,277 @@ static struct irqaction ipi_irqaction = 
+@@ -240,11 +257,341 @@
  };
  #endif
  
@@ -23098,72 +24691,45 @@
 +#endif /* CONFIG_SMP */
 +}
 +
-+#endif /* CONFIG_XEN */
++void
++xen_irq_init(void)
++{
++	struct callback_register event = {
++		.type = CALLBACKTYPE_event,
++		.address = (unsigned long)&xen_event_callback,
++	};
 +
- void
- register_percpu_irq (ia64_vector vec, struct irqaction *action)
- {
- 	irq_desc_t *desc;
- 	unsigned int irq;
- 
-+#ifdef CONFIG_XEN
-+	if (is_running_on_xen())
-+		return xen_register_percpu_irq(smp_processor_id(), 
-+					       vec, action, 1);
++	xen_init_IRQ();
++	BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
++	late_time_init = xen_bind_early_percpu_irq;
++#ifdef CONFIG_SMP
++	register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
 +#endif
++}
 +
- 	for (irq = 0; irq < NR_IRQS; ++irq)
- 		if (irq_to_vector(irq) == vec) {
- 			desc = irq_desc + irq;
-@@ -259,6 +541,21 @@ register_percpu_irq (ia64_vector vec, st
- void __init
- init_IRQ (void)
- {
-+#ifdef CONFIG_XEN
-+	/* Maybe put into platform_irq_init later */
-+	if (is_running_on_xen()) {
-+		struct callback_register event = {
-+			.type = CALLBACKTYPE_event,
-+			.address = (unsigned long)&xen_event_callback,
-+		};
-+		xen_init_IRQ();
-+		BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
-+		late_time_init = xen_bind_early_percpu_irq;
-+#ifdef CONFIG_SMP
-+		register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
-+#endif /* CONFIG_SMP */
-+	}
-+#endif /* CONFIG_XEN */
- 	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
- #ifdef CONFIG_SMP
- 	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
-@@ -276,6 +573,54 @@ ia64_send_ipi (int cpu, int vector, int 
- 	unsigned long ipi_data;
- 	unsigned long phys_cpu_id;
- 
-+#ifdef CONFIG_XEN
-+        if (is_running_on_xen()) {
-+		int irq = -1;
++void
++xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
++{
++	int irq = -1;
++	extern void xen_send_ipi(int cpu, int vec);
 +
 +#ifdef CONFIG_SMP
-+		/* TODO: we need to call vcpu_up here */
-+		if (unlikely(vector == ap_wakeup_vector)) {
-+			extern void xen_send_ipi (int cpu, int vec);
-+
-+			/* XXX
-+			 * This should be in __cpu_up(cpu) in ia64 smpboot.c
-+			 * like x86. But don't want to modify it,
-+			 * keep it untouched.
-+			 */
-+			xen_smp_intr_init_early(cpu);
++	/* TODO: we need to call vcpu_up here */
++	if (unlikely(vector == ap_wakeup_vector)) {
++		/* XXX
++		 * This should be in __cpu_up(cpu) in ia64 smpboot.c
++		 * like x86. But don't want to modify it,
++		 * keep it untouched.
++		 */
++		xen_smp_intr_init_early(cpu);
 +
-+			xen_send_ipi (cpu, vector);
-+			//vcpu_prepare_and_up(cpu);
-+			return;
-+		}
++		xen_send_ipi (cpu, vector);
++		//vcpu_prepare_and_up(cpu);
++		return;
++	}
 +#endif
 +
-+		switch(vector) {
++	switch (vector) {
 +		case IA64_IPI_VECTOR:
 +			irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
 +			break;
@@ -23176,25 +24742,331 @@
 +		case IA64_CPEP_VECTOR:
 +			irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
 +			break;
++		case IA64_TIMER_VECTOR:
++			xen_send_ipi(cpu, vector);
++			return;
 +		default:
 +			printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
 +			       vector);
 +			irq = 0;
 +			break;
-+		}		
++	}		
 +	
-+		BUG_ON(irq < 0);
-+		notify_remote_via_irq(irq);
-+		return;
-+        }
++	BUG_ON(irq < 0);
++	notify_remote_via_irq(irq);
++	return;
++}
 +#endif /* CONFIG_XEN */
 +
+ void
+ register_percpu_irq (ia64_vector vec, struct irqaction *action)
+ {
+ 	irq_desc_t *desc;
+ 	unsigned int irq;
++
++#ifdef CONFIG_XEN
++	if (is_running_on_xen())
++		return xen_register_percpu_irq(smp_processor_id(), 
++					       vec, action, 1);
++#endif
+ 
+ 	for (irq = 0; irq < NR_IRQS; ++irq)
+ 		if (irq_to_vector(irq) == vec) {
+@@ -267,6 +614,10 @@
+ 	pfm_init_percpu();
+ #endif
+ 	platform_irq_init();
++#ifdef CONFIG_XEN
++	if (is_running_on_xen() && !ia64_platform_is("xen"))
++		xen_irq_init();
++#endif
+ }
+ 
+ void
+@@ -275,6 +626,13 @@
+ 	void __iomem *ipi_addr;
+ 	unsigned long ipi_data;
+ 	unsigned long phys_cpu_id;
++
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		xen_platform_send_ipi(cpu, vector, delivery_mode, redirect);
++		return;
++	}
++#endif
+ 
  #ifdef CONFIG_SMP
  	phys_cpu_id = cpu_physical_id(cpu);
- #else
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/pal.S tmp-linux-2.6-xen.patch/arch/ia64/kernel/pal.S
---- pristine-linux-2.6.18.2/arch/ia64/kernel/pal.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/pal.S	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/machine_kexec.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/kernel/machine_kexec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,204 @@
++/*
++ * arch/ia64/kernel/machine_kexec.c
++ *
++ * Handle transition of Linux booting another kernel
++ * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
++ * Copyright (C) 2005 Khalid Aziz <khalid.aziz at hp.com>
++ * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou at intel.com>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2.  See the file COPYING for more details.
++ */
++
++#include <linux/mm.h>
++#include <linux/kexec.h>
++#include <linux/cpu.h>
++#include <linux/irq.h>
++#include <asm/mmu_context.h>
++#include <asm/setup.h>
++#include <asm/delay.h>
++#include <asm/meminit.h>
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#include <asm/kexec.h>
++#endif
++
++typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
++		struct ia64_boot_param *, unsigned long);
++
++struct kimage *ia64_kimage;
++
++struct resource efi_memmap_res = {
++        .name  = "EFI Memory Map",
++        .start = 0,
++        .end   = 0,
++        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++struct resource boot_param_res = {
++        .name  = "Boot parameter",
++        .start = 0,
++        .end   = 0,
++        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++
++/*
++ * Do what every setup is needed on image and the
++ * reboot code buffer to allow us to avoid allocations
++ * later.
++ */
++int machine_kexec_prepare(struct kimage *image)
++{
++	void *control_code_buffer;
++	const unsigned long *func;
++
++	func = (unsigned long *)&relocate_new_kernel;
++	/* Pre-load control code buffer to minimize work in kexec path */
++	control_code_buffer = page_address(image->control_code_page);
++	memcpy((void *)control_code_buffer, (const void *)func[0],
++			relocate_new_kernel_size);
++	flush_icache_range((unsigned long)control_code_buffer,
++			(unsigned long)control_code_buffer + relocate_new_kernel_size);
++	ia64_kimage = image;
++
++	return 0;
++}
++
++void machine_kexec_cleanup(struct kimage *image)
++{
++}
++
++#ifndef CONFIG_XEN
++void machine_shutdown(void)
++{
++	int cpu;
++
++	for_each_online_cpu(cpu) {
++		if (cpu != smp_processor_id())
++			cpu_down(cpu);
++	}
++	kexec_disable_iosapic();
++}
++
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ */
++extern void *efi_get_pal_addr(void);
++static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
++{
++	struct kimage *image = arg;
++	relocate_new_kernel_t rnk;
++	void *pal_addr = efi_get_pal_addr();
++	unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
++	unsigned long vector;
++	int ii;
++
++	if (image->type == KEXEC_TYPE_CRASH) {
++		crash_save_this_cpu();
++		current->thread.ksp = (__u64)info->sw - 16;
++	}
++
++	/* Interrupts aren't acceptable while we reboot */
++	local_irq_disable();
++
++	/* Mask CMC and Performance Monitor interrupts */
++	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
++	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
++
++	/* Mask ITV and Local Redirect Registers */
++	ia64_set_itv(1 << 16);
++	ia64_set_lrr0(1 << 16);
++	ia64_set_lrr1(1 << 16);
++
++	/* terminate possible nested in-service interrupts */
++	for (ii = 0; ii < 16; ii++)
++		ia64_eoi();
++
++	/* unmask TPR and clear any pending interrupts */
++	ia64_setreg(_IA64_REG_CR_TPR, 0);
++	ia64_srlz_d();
++	vector = ia64_get_ivr();
++	while (vector != IA64_SPURIOUS_INT_VECTOR) {
++		ia64_eoi();
++		vector = ia64_get_ivr();
++	}
++	platform_kernel_launch_event();
++	rnk = (relocate_new_kernel_t)&code_addr;
++	(*rnk)(image->head, image->start, ia64_boot_param,
++		     GRANULEROUNDDOWN((unsigned long) pal_addr));
++	BUG();
++}
++
++void machine_kexec(struct kimage *image)
++{
++	unw_init_running(ia64_machine_kexec, image);
++	for(;;);
++}
++#else /* CONFIG_XEN */
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,struct kimage *image)
++{
++	xki->reboot_code_buffer =
++		kexec_page_to_pfn(image->control_code_page) << PAGE_SHIFT;
++}
++
++static struct resource xen_hypervisor_heap_res;
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++					 struct resource *phys_cpus,
++					 int nr_phys_cpus)
++{
++	xen_kexec_range_t range;
++	int k;
++
++	/* fill in xen_hypervisor_heap_res with hypervisor heap
++	 * machine address range
++	 */
++
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_XENHEAP;
++
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		return -1;
++
++	xen_hypervisor_heap_res.name = "Hypervisor heap";
++	xen_hypervisor_heap_res.start = range.start;
++	xen_hypervisor_heap_res.end = range.start + range.size - 1;
++	xen_hypervisor_heap_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++
++	/* The per-cpu crash note  resources belong inside the
++	 * hypervisor heap resource */
++	for (k = 0; k < nr_phys_cpus; k++)
++		request_resource(&xen_hypervisor_heap_res, phys_cpus + k);
++
++	/* fill in efi_memmap_res with EFI memmap machine address range */
++
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_EFI_MEMMAP;
++
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		return -1;
++
++	efi_memmap_res.start = range.start;
++	efi_memmap_res.end = range.start + range.size - 1;
++
++	/* fill in boot_param_res with boot parameter machine address range */
++
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_BOOT_PARAM;
++
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		return -1;
++
++	boot_param_res.start = range.start;
++	boot_param_res.end = range.start + range.size - 1;
++
++	return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res)
++{
++	request_resource(res, &xen_hypervisor_heap_res);
++}
++#endif /* CONFIG_XEN */
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/mca.c
+--- a/arch/ia64/kernel/mca.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/mca.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -79,6 +79,7 @@
+ #include <asm/system.h>
+ #include <asm/sal.h>
+ #include <asm/mca.h>
++#include <asm/kexec.h>
+ 
+ #include <asm/irq.h>
+ #include <asm/hw_irq.h>
+@@ -160,11 +161,33 @@
+ 
+ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
+ 
++#ifdef CONFIG_XEN
++DEFINE_SPINLOCK(ia64_mca_xencomm_lock);
++LIST_HEAD(ia64_mca_xencomm_list);
++
++#define IA64_MCA_XENCOMM_ALLOCATE(rec, desc) \
++	if (is_running_on_xen()) { \
++		ia64_mca_xencomm_t *entry; \
++		entry = alloc_bootmem(sizeof(ia64_mca_xencomm_t)); \
++		entry->record = rec; \
++		entry->handle = desc; \
++		list_add(&entry->list, &ia64_mca_xencomm_list); \
++	}
++#define IA64_LOG_ALLOCATE(it, size) \
++	{ia64_err_rec_t *rec; \
++	ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = rec = \
++		(ia64_err_rec_t *)alloc_bootmem(size); \
++	IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size)); \
++	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = rec = \
++		(ia64_err_rec_t *)alloc_bootmem(size); \
++	IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size));}
++#else
+ #define IA64_LOG_ALLOCATE(it, size) \
+ 	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+ 		(ia64_err_rec_t *)alloc_bootmem(size); \
+ 	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+ 		(ia64_err_rec_t *)alloc_bootmem(size);}
++#endif
+ #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
+ #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
+ #define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
+@@ -1066,7 +1089,12 @@
+ 		rh->severity = sal_log_severity_corrected;
+ 		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
+ 		sos->os_status = IA64_MCA_CORRECTED;
+-	}
++ 	} else {
++#ifdef CONFIG_KEXEC
++		atomic_set(&kdump_in_progress, 1);
++		monarch_cpu = -1;
++#endif
++ 	}
+ 	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
+ 			== NOTIFY_STOP)
+ 		ia64_mca_spin(__FUNCTION__);
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/pal.S
+--- a/arch/ia64/kernel/pal.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/pal.S	Wed Sep 10 10:54:08 2008 +0100
 @@ -16,6 +16,7 @@
  #include <asm/processor.h>
  
@@ -23203,7 +25075,7 @@
  pal_entry_point:
  	data8 ia64_pal_default_handler
  	.text
-@@ -53,7 +54,7 @@ END(ia64_pal_default_handler)
+@@ -53,7 +54,7 @@
   * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
   *
   */
@@ -23212,7 +25084,7 @@
  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
  	alloc loc1 = ar.pfs,5,5,0,0
  	movl loc2 = pal_entry_point
-@@ -90,7 +91,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+@@ -90,7 +91,7 @@
  	;;
  	srlz.d				// seralize restoration of psr.l
  	br.ret.sptk.many b0
@@ -23221,97 +25093,55 @@
  
  /*
   * Make a PAL call using the stacked registers calling convention.
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/patch.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/patch.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/patch.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/patch.c	2007-07-30 16:35:11.000000000 +0200
-@@ -184,6 +184,69 @@ patch_brl_fsys_bubble_down (unsigned lon
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/patch.c
+--- a/arch/ia64/kernel/patch.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/patch.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -184,9 +184,37 @@
  	ia64_srlz_i();
  }
  
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+extern char __start_gate_running_on_xen_patchlist[];
-+extern char __end_gate_running_on_xen_patchlist[];
-+
-+void
-+patch_running_on_xen(unsigned long start, unsigned long end)
-+{
-+	extern int running_on_xen;
-+	s32 *offp = (s32 *)start;
-+	u64 ip;
-+
-+	while (offp < (s32 *)end) {
-+		ip = (u64)ia64_imva((char *)offp + *offp);
-+		ia64_patch_imm64(ip, (u64)&running_on_xen);
-+		ia64_fc((void *)ip);
-+		++offp;
-+	}
-+	ia64_sync_i();
-+	ia64_srlz_i();
-+}
-+
-+static void
-+patch_brl_symaddr(unsigned long start, unsigned long end,
-+                  unsigned long symaddr)
++#ifdef CONFIG_XEN
++void __init
++ia64_patch_gate_xen (void)
 +{
-+	s32 *offp = (s32 *)start;
-+	u64 ip;
-+
-+	while (offp < (s32 *)end) {
-+		ip = (u64)offp + *offp;
-+		ia64_patch_imm60((u64)ia64_imva((void *)ip),
-+				 (u64)(symaddr - (ip & -16)) / 16);
-+		ia64_fc((void *)ip);
-+		++offp;
-+	}
-+	ia64_sync_i();
-+	ia64_srlz_i();
-+}
-+
-+#define EXTERN_PATCHLIST(name)					\
-+	extern char __start_gate_brl_##name##_patchlist[];	\
-+	extern char __end_gate_brl_##name##_patchlist[];	\
-+	extern char name[]
-+
-+#define PATCH_BRL_SYMADDR(name)						\
-+	patch_brl_symaddr((unsigned long)__start_gate_brl_##name##_patchlist, \
-+	                  (unsigned long)__end_gate_brl_##name##_patchlist,   \
-+	                  (unsigned long)name)
++	extern char __start_gate_mckinley_e9_patchlist_xen[], __end_gate_mckinley_e9_patchlist_xen[];
++	extern char __start_gate_vtop_patchlist_xen[], __end_gate_vtop_patchlist_xen[];
++	extern char __start_gate_fsyscall_patchlist_xen[], __end_gate_fsyscall_patchlist_xen[];
++	extern char __start_gate_brl_fsys_bubble_down_patchlist_xen[], __end_gate_brl_fsys_bubble_down_patchlist_xen[];
++#	define START(name)	((unsigned long) __start_gate_##name##_patchlist_xen)
++#	define END(name)	((unsigned long)__end_gate_##name##_patchlist_xen)
 +
-+static void
-+patch_brl_in_vdso(void)
-+{
-+	EXTERN_PATCHLIST(xen_ssm_i_0);
-+	EXTERN_PATCHLIST(xen_ssm_i_1);
++	patch_fsyscall_table(START(fsyscall), END(fsyscall));
++	patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
++	ia64_patch_vtop(START(vtop), END(vtop));
++	ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
 +
-+	PATCH_BRL_SYMADDR(xen_ssm_i_0);
-+	PATCH_BRL_SYMADDR(xen_ssm_i_1);
++#	undef START
++#	undef END
 +}
 +#else
-+#define patch_running_on_xen(start, end)	do { } while (0)
-+#define patch_brl_in_vdso()			do { } while (0)
++#define ia64_patch_gate_xen()	do { } while (0)
 +#endif
 +
  void __init
  ia64_patch_gate (void)
  {
-@@ -192,6 +255,10 @@ ia64_patch_gate (void)
++	if (is_running_on_xen()) {
++		ia64_patch_gate_xen();
++		return;
++	}
++	
+ #	define START(name)	((unsigned long) __start_gate_##name##_patchlist)
+ #	define END(name)	((unsigned long)__end_gate_##name##_patchlist)
  
- 	patch_fsyscall_table(START(fsyscall), END(fsyscall));
- 	patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
-+#ifdef CONFIG_XEN
-+	patch_running_on_xen(START(running_on_xen), END(running_on_xen));
-+	patch_brl_in_vdso();
-+#endif
- 	ia64_patch_vtop(START(vtop), END(vtop));
- 	ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/perfmon.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/perfmon.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/perfmon.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/perfmon.c	2007-07-30 16:35:11.000000000 +0200
-@@ -52,6 +52,28 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/perfmon.c
+--- a/arch/ia64/kernel/perfmon.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/perfmon.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -52,6 +52,31 @@
  #include <asm/delay.h>
  
  #ifdef CONFIG_PERFMON
++#include <asm/hypervisor.h>
 +#ifdef CONFIG_XEN
 +//#include <xen/xenoprof.h>
 +#include <xen/interface/xenoprof.h>
@@ -23331,13 +25161,15 @@
 +#define init_xenoprof_primary(is_primary)	do { } while (0)
 +#define is_xenoprof_primary()			(0)
 +#define XEN_NOT_SUPPORTED_YET			do { } while (0)
-+#define HYPERVISOR_perfmon_op(cmd, arg, count)	do { } while (0)
++#define HYPERVISOR_perfmon_op(cmd, arg, count)	(0)
++#define HYPERVISOR_xenoprof_op(op, arg)		({(void)arg;0;})
++struct xenoprof_init { /* dummy */ };
 +#endif
 +
  /*
   * perfmon context state
   */
-@@ -1514,6 +1536,7 @@ pfm_read(struct file *filp, char __user 
+@@ -1514,6 +1539,7 @@
  	ssize_t ret;
  	unsigned long flags;
    	DECLARE_WAITQUEUE(wait, current);
@@ -23345,7 +25177,7 @@
  	if (PFM_IS_FILE(filp) == 0) {
  		printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
  		return -EINVAL;
-@@ -2112,6 +2135,15 @@ doit:
+@@ -2112,6 +2138,15 @@
  	 */
  	if (free_possible) pfm_context_free(ctx);
  
@@ -23361,7 +25193,7 @@
  	return 0;
  }
  
-@@ -2735,6 +2767,23 @@ pfm_context_create(pfm_context_t *ctx, v
+@@ -2735,6 +2770,23 @@
  	 */
  	pfm_reset_pmu_state(ctx);
  
@@ -23385,7 +25217,7 @@
  	return 0;
  
  buffer_error:
-@@ -2871,6 +2920,12 @@ pfm_write_pmcs(pfm_context_t *ctx, void 
+@@ -2871,6 +2923,12 @@
  	pfm_reg_check_t	wr_func;
  #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
  
@@ -23398,7 +25230,7 @@
  	state     = ctx->ctx_state;
  	is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  	is_system = ctx->ctx_fl_system;
-@@ -3111,6 +3166,12 @@ pfm_write_pmds(pfm_context_t *ctx, void 
+@@ -3111,6 +3169,12 @@
  	int ret = -EINVAL;
  	pfm_reg_check_t wr_func;
  
@@ -23411,7 +25243,7 @@
  
  	state     = ctx->ctx_state;
  	is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
-@@ -3308,6 +3369,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *
+@@ -3308,6 +3372,7 @@
  	int is_loaded, is_system, is_counting, expert_mode;
  	int ret = -EINVAL;
  	pfm_reg_check_t rd_func;
@@ -23419,7 +25251,7 @@
  
  	/*
  	 * access is possible when loaded only for
-@@ -3559,6 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *ar
+@@ -3559,6 +3624,7 @@
  	pfm_ovfl_ctrl_t rst_ctrl;
  	int state, is_system;
  	int ret = 0;
@@ -23427,7 +25259,7 @@
  
  	state     = ctx->ctx_state;
  	fmt       = ctx->ctx_buf_fmt;
-@@ -3708,6 +3771,7 @@ static int
+@@ -3708,6 +3774,7 @@
  pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  {
  	unsigned int m = *(unsigned int *)arg;
@@ -23435,7 +25267,7 @@
  
  	pfm_sysctl.debug = m == 0 ? 0 : 1;
  
-@@ -3978,6 +4042,8 @@ pfm_get_features(pfm_context_t *ctx, voi
+@@ -3978,6 +4045,8 @@
  {
  	pfarg_features_t *req = (pfarg_features_t *)arg;
  
@@ -23444,20 +25276,20 @@
  	req->ft_version = PFM_VERSION;
  	return 0;
  }
-@@ -3989,6 +4055,12 @@ pfm_stop(pfm_context_t *ctx, void *arg, 
+@@ -3988,6 +4057,12 @@
+ 	struct pt_regs *tregs;
  	struct task_struct *task = PFM_CTX_TASK(ctx);
  	int state, is_system;
- 
++
 +  	if (is_running_on_xen()) {
 +		if (is_xenoprof_primary())
 +			return HYPERVISOR_perfmon_op(PFM_STOP, NULL, 0);
 +		return 0;
 +  	}
-+
+ 
  	state     = ctx->ctx_state;
  	is_system = ctx->ctx_fl_system;
- 
-@@ -4077,6 +4149,11 @@ pfm_start(pfm_context_t *ctx, void *arg,
+@@ -4077,6 +4152,11 @@
  	struct pt_regs *tregs;
  	int state, is_system;
  
@@ -23469,7 +25301,7 @@
  	state     = ctx->ctx_state;
  	is_system = ctx->ctx_fl_system;
  
-@@ -4159,6 +4236,7 @@ pfm_get_pmc_reset(pfm_context_t *ctx, vo
+@@ -4159,6 +4239,7 @@
  	unsigned int cnum;
  	int i;
  	int ret = -EINVAL;
@@ -23477,7 +25309,7 @@
  
  	for (i = 0; i < count; i++, req++) {
  
-@@ -4217,6 +4295,11 @@ pfm_context_load(pfm_context_t *ctx, voi
+@@ -4217,6 +4298,11 @@
  	int ret = 0;
  	int state, is_system, set_dbregs = 0;
  
@@ -23489,7 +25321,7 @@
  	state     = ctx->ctx_state;
  	is_system = ctx->ctx_fl_system;
  	/*
-@@ -4465,6 +4548,12 @@ pfm_context_unload(pfm_context_t *ctx, v
+@@ -4465,6 +4551,12 @@
  	int prev_state, is_system;
  	int ret;
  
@@ -23502,10 +25334,421 @@
  	DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
  
  	prev_state = ctx->ctx_state;
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/setup.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/setup.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/setup.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/setup.c	2007-07-30 16:35:11.000000000 +0200
-@@ -60,6 +60,12 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/relocate_kernel.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/kernel/relocate_kernel.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,338 @@
++/*
++ * arch/ia64/kernel/relocate_kernel.S
++ *
++ * Relocate kexec'able kernel and start it
++ *
++ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
++ * Copyright (C) 2005 Khalid Aziz  <khalid.aziz at hp.com>
++ * Copyright (C) 2005 Intel Corp,  Zou Nan hai <nanhai.zou at intel.com>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2.  See the file COPYING for more details.
++ */
++#include <asm/asmmacro.h>
++#include <asm/kregs.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/mca_asm.h>
++
++       /* Must be relocatable PIC code callable as a C function
++        */
++GLOBAL_ENTRY(relocate_new_kernel)
++	.prologue
++	alloc r31=ar.pfs,4,0,0,0
++        .body
++.reloc_entry:
++{
++	rsm psr.i| psr.ic
++	mov r2=ip
++}
++	;;
++{
++        flushrs                         // must be first insn in group
++        srlz.i
++}
++	;;
++	dep r2=0,r2,61,3		//to physical address
++	;;
++	//first switch to physical mode
++	add r3=1f-.reloc_entry, r2
++	movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
++	mov ar.rsc=0	          	// put RSE in enforced lazy mode
++	;;
++	add sp=(memory_stack_end - 16 - .reloc_entry),r2
++	add r8=(register_stack - .reloc_entry),r2
++	;;
++	mov r18=ar.rnat
++	mov ar.bspstore=r8
++	;;
++        mov cr.ipsr=r16
++        mov cr.iip=r3
++        mov cr.ifs=r0
++	srlz.i
++	;;
++	mov ar.rnat=r18
++	rfi
++	;;
++1:
++	//physical mode code begin
++	mov b6=in1
++#ifdef CONFIG_XEN
++	mov r28=in2		//already a physical address
++#else
++	dep r28=0,in2,61,3	//to physical address
++
++	// purge all TC entries
++#define O(member)       IA64_CPUINFO_##member##_OFFSET
++        GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
++        ;;
++        addl r17=O(PTCE_STRIDE),r2
++        addl r2=O(PTCE_BASE),r2
++        ;;
++        ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;    	// r18=ptce_base
++        ld4 r19=[r2],4                                  // r19=ptce_count[0]
++        ld4 r21=[r17],4                                 // r21=ptce_stride[0]
++        ;;
++        ld4 r20=[r2]                                    // r20=ptce_count[1]
++        ld4 r22=[r17]                                   // r22=ptce_stride[1]
++        mov r24=r0
++        ;;
++        adds r20=-1,r20
++        ;;
++#undef O
++2:
++        cmp.ltu p6,p7=r24,r19
++(p7)    br.cond.dpnt.few 4f
++        mov ar.lc=r20
++3:
++        ptc.e r18
++        ;;
++        add r18=r22,r18
++        br.cloop.sptk.few 3b
++        ;;
++        add r18=r21,r18
++        add r24=1,r24
++        ;;
++        br.sptk.few 2b
++4:
++        srlz.i
++        ;;
++	//purge TR entry for kernel text and data
++        movl r16=KERNEL_START
++        mov r18=KERNEL_TR_PAGE_SHIFT<<2
++        ;;
++        ptr.i r16, r18
++        ptr.d r16, r18
++        ;;
++        srlz.i
++        ;;
++
++	// purge TR entry for percpu data
++        movl r16=PERCPU_ADDR
++        mov r18=PERCPU_PAGE_SHIFT<<2
++        ;;
++        ptr.d r16,r18
++        ;;
++        srlz.d
++	;;
++
++        // purge TR entry for pal code
++        mov r16=in3
++        mov r18=IA64_GRANULE_SHIFT<<2
++        ;;
++        ptr.i r16,r18
++        ;;
++        srlz.i
++	;;
++
++        // purge TR entry for stack
++        mov r16=IA64_KR(CURRENT_STACK)
++        ;;
++        shl r16=r16,IA64_GRANULE_SHIFT
++        movl r19=PAGE_OFFSET
++        ;;
++        add r16=r19,r16
++        mov r18=IA64_GRANULE_SHIFT<<2
++        ;;
++        ptr.d r16,r18
++        ;;
++        srlz.i
++	;;
++#endif /* ! CONFIG_XEN */
++
++	//copy segments
++	movl r16=PAGE_MASK
++        mov  r30=in0                    // in0 is page_list
++        br.sptk.few .dest_page
++	;;
++.loop:
++	ld8  r30=[in0], 8;;
++.dest_page:
++	tbit.z p0, p6=r30, 0;;    	// 0x1 dest page
++(p6)	and r17=r30, r16
++(p6)	br.cond.sptk.few .loop;;
++
++	tbit.z p0, p6=r30, 1;;		// 0x2 indirect page
++(p6)	and in0=r30, r16
++(p6)	br.cond.sptk.few .loop;;
++
++	tbit.z p0, p6=r30, 2;;		// 0x4 end flag
++(p6)	br.cond.sptk.few .end_loop;;
++
++	tbit.z p6, p0=r30, 3;;		// 0x8 source page
++(p6)	br.cond.sptk.few .loop
++
++	and r18=r30, r16
++
++	// simple copy page, may optimize later
++	movl r14=PAGE_SIZE/8 - 1;;
++	mov ar.lc=r14;;
++1:
++	ld8 r14=[r18], 8;;
++	st8 [r17]=r14;;
++	fc.i r17
++	add r17=8, r17
++	br.ctop.sptk.few 1b
++	br.sptk.few .loop
++	;;
++
++.end_loop:
++	sync.i			// for fc.i
++	;;
++	srlz.i
++	;;
++	srlz.d
++	;;
++	br.call.sptk.many b0=b6;;
++
++.align  32
++memory_stack:
++	.fill           8192, 1, 0
++memory_stack_end:
++register_stack:
++	.fill           8192, 1, 0
++register_stack_end:
++relocate_new_kernel_end:
++END(relocate_new_kernel)
++
++.global relocate_new_kernel_size
++relocate_new_kernel_size:
++	data8	relocate_new_kernel_end - relocate_new_kernel
++
++GLOBAL_ENTRY(ia64_dump_cpu_regs)
++        .prologue
++        alloc loc0=ar.pfs,1,2,0,0
++        .body
++        mov     ar.rsc=0                // put RSE in enforced lazy mode
++        add     loc1=4*8, in0           // save r4 and r5 first
++        ;;
++{
++        flushrs                         // flush dirty regs to backing store
++        srlz.i
++}
++        st8 [loc1]=r4, 8
++        ;;
++        st8 [loc1]=r5, 8
++        ;;
++        add loc1=32*8, in0
++        mov r4=ar.rnat
++        ;;
++        st8 [in0]=r0, 8			// r0
++        st8 [loc1]=r4, 8		// rnat
++        mov r5=pr
++        ;;
++        st8 [in0]=r1, 8			// r1
++        st8 [loc1]=r5, 8		// pr
++        mov r4=b0
++        ;;
++        st8 [in0]=r2, 8			// r2
++        st8 [loc1]=r4, 8		// b0
++        mov r5=b1;
++        ;;
++        st8 [in0]=r3, 24		// r3
++        st8 [loc1]=r5, 8		// b1
++        mov r4=b2
++        ;;
++        st8 [in0]=r6, 8			// r6
++        st8 [loc1]=r4, 8		// b2
++	mov r5=b3
++        ;;
++        st8 [in0]=r7, 8			// r7
++        st8 [loc1]=r5, 8		// b3
++        mov r4=b4
++        ;;
++        st8 [in0]=r8, 8			// r8
++        st8 [loc1]=r4, 8		// b4
++        mov r5=b5
++        ;;
++        st8 [in0]=r9, 8			// r9
++        st8 [loc1]=r5, 8		// b5
++        mov r4=b6
++        ;;
++        st8 [in0]=r10, 8		// r10
++        st8 [loc1]=r5, 8		// b6
++        mov r5=b7
++        ;;
++        st8 [in0]=r11, 8		// r11
++        st8 [loc1]=r5, 8		// b7
++        mov r4=b0
++        ;;
++        st8 [in0]=r12, 8		// r12
++        st8 [loc1]=r4, 8		// ip
++        mov r5=loc0
++	;;
++        st8 [in0]=r13, 8		// r13
++        extr.u r5=r5, 0, 38		// ar.pfs.pfm
++	mov r4=r0			// user mask
++        ;;
++        st8 [in0]=r14, 8		// r14
++        st8 [loc1]=r5, 8		// cfm
++        ;;
++        st8 [in0]=r15, 8		// r15
++        st8 [loc1]=r4, 8        	// user mask
++	mov r5=ar.rsc
++        ;;
++        st8 [in0]=r16, 8		// r16
++        st8 [loc1]=r5, 8        	// ar.rsc
++        mov r4=ar.bsp
++        ;;
++        st8 [in0]=r17, 8		// r17
++        st8 [loc1]=r4, 8        	// ar.bsp
++        mov r5=ar.bspstore
++        ;;
++        st8 [in0]=r18, 8		// r18
++        st8 [loc1]=r5, 8        	// ar.bspstore
++        mov r4=ar.rnat
++        ;;
++        st8 [in0]=r19, 8		// r19
++        st8 [loc1]=r4, 8        	// ar.rnat
++        mov r5=ar.ccv
++        ;;
++        st8 [in0]=r20, 8		// r20
++	st8 [loc1]=r5, 8        	// ar.ccv
++        mov r4=ar.unat
++        ;;
++        st8 [in0]=r21, 8		// r21
++        st8 [loc1]=r4, 8        	// ar.unat
++        mov r5 = ar.fpsr
++        ;;
++        st8 [in0]=r22, 8		// r22
++        st8 [loc1]=r5, 8        	// ar.fpsr
++        mov r4 = ar.unat
++        ;;
++        st8 [in0]=r23, 8		// r23
++        st8 [loc1]=r4, 8        	// unat
++        mov r5 = ar.fpsr
++        ;;
++        st8 [in0]=r24, 8		// r24
++        st8 [loc1]=r5, 8        	// fpsr
++        mov r4 = ar.pfs
++        ;;
++        st8 [in0]=r25, 8		// r25
++        st8 [loc1]=r4, 8        	// ar.pfs
++        mov r5 = ar.lc
++        ;;
++        st8 [in0]=r26, 8		// r26
++        st8 [loc1]=r5, 8        	// ar.lc
++        mov r4 = ar.ec
++        ;;
++        st8 [in0]=r27, 8		// r27
++        st8 [loc1]=r4, 8        	// ar.ec
++        mov r5 = ar.csd
++        ;;
++        st8 [in0]=r28, 8		// r28
++        st8 [loc1]=r5, 8        	// ar.csd
++        mov r4 = ar.ssd
++        ;;
++        st8 [in0]=r29, 8		// r29
++        st8 [loc1]=r4, 8        	// ar.ssd
++        ;;
++        st8 [in0]=r30, 8		// r30
++        ;;
++	st8 [in0]=r31, 8		// r31
++        mov ar.pfs=loc0
++        ;;
++        br.ret.sptk.many rp
++END(ia64_dump_cpu_regs)
++
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/salinfo.c
+--- a/arch/ia64/kernel/salinfo.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/salinfo.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -375,6 +375,25 @@
+ 		data->open = 0;
+ 		return -ENOMEM;
+ 	}
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		ia64_mca_xencomm_t *entry;
++		unsigned long flags;
++
++		entry = vmalloc(sizeof(ia64_mca_xencomm_t));
++		if (!entry) {
++			data->open = 0;
++			vfree(data->log_buffer);
++			return -ENOMEM;
++		}
++		entry->record = data->log_buffer;
++		entry->handle = xencomm_map(data->log_buffer, 
++					ia64_sal_get_state_info_size(data->type));
++		spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++		list_add(&entry->list, &ia64_mca_xencomm_list);
++		spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++	}
++#endif
+ 
+ 	return 0;
+ }
+@@ -386,6 +405,30 @@
+ 	struct salinfo_data *data = entry->data;
+ 
+ 	if (data->state == STATE_NO_DATA) {
++#ifdef CONFIG_XEN
++		if (is_running_on_xen()) {
++			struct list_head *pos, *n;
++			ia64_mca_xencomm_t *found_entry = NULL;
++			unsigned long flags;
++
++			spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++			list_for_each_safe(pos, n, &ia64_mca_xencomm_list) {
++				ia64_mca_xencomm_t *entry;
++
++				entry = list_entry(pos, ia64_mca_xencomm_t, list);
++				if (entry->record == data->log_buffer) {
++					list_del(&entry->list);
++					found_entry = entry;
++					break;
++				}
++			}
++			spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++			if (found_entry) {
++				xencomm_free(found_entry->handle);
++				vfree(found_entry);
++			}
++		}
++#endif
+ 		vfree(data->log_buffer);
+ 		vfree(data->oemdata);
+ 		data->log_buffer = NULL;
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/setup.c
+--- a/arch/ia64/kernel/setup.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -43,6 +43,8 @@
+ #include <linux/initrd.h>
+ #include <linux/pm.h>
+ #include <linux/cpufreq.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
+ 
+ #include <asm/ia32.h>
+ #include <asm/machvec.h>
+@@ -60,6 +62,12 @@
  #include <asm/system.h>
  #include <asm/unistd.h>
  #include <asm/system.h>
@@ -23518,10 +25761,12 @@
  
  #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
  # error "struct cpuinfo_ia64 too big!"
-@@ -70,6 +76,34 @@ unsigned long __per_cpu_offset[NR_CPUS];
+@@ -68,6 +76,34 @@
+ #ifdef CONFIG_SMP
+ unsigned long __per_cpu_offset[NR_CPUS];
  EXPORT_SYMBOL(__per_cpu_offset);
- #endif
- 
++#endif
++
 +#ifdef CONFIG_XEN
 +static void
 +xen_panic_hypercall(struct unw_frame_info *info, void *arg)
@@ -23548,12 +25793,10 @@
 +	local_irq_disable();
 +	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
 +}
-+#endif
-+
- extern void ia64_setup_printk_clock(void);
+ #endif
  
- DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-@@ -242,6 +276,14 @@ reserve_memory (void)
+ extern void ia64_setup_printk_clock(void);
+@@ -242,6 +278,14 @@
  	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
  	n++;
  
@@ -23568,59 +25811,147 @@
  #ifdef CONFIG_BLK_DEV_INITRD
  	if (ia64_boot_param->initrd_start) {
  		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
-@@ -402,6 +444,19 @@ setup_arch (char **cmdline_p)
+@@ -253,6 +297,56 @@
+ 	efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
+ 	n++;
+ 
++#ifdef CONFIG_KEXEC
++	/* crashkernel=size at offset specifies the size to reserve for a crash
++	 * kernel. If offset is 0, then it is determined automatically.
++	 * By reserving this memory we guarantee that linux never set's it
++	 * up as a DMA target.Useful for holding code to do something
++	 * appropriate after a kernel panic.
++	 */
++	{
++		char *from = strstr(saved_command_line, "crashkernel=");
++		unsigned long base, size;
++#ifdef CONFIG_XEN
++		if (is_initial_xendomain() && from)
++				printk("Ignoring crashkernel command line, "
++				       "parameter will be supplied by xen\n");
++		else {
++#endif
++		if (from) {
++			size = memparse(from + 12, &from);
++			if (*from == '@')
++				base = memparse(from+1, &from);
++			else
++				base = 0;
++			if (size) {
++				if (!base) {
++					sort_regions(rsvd_region, n);
++					base = kdump_find_rsvd_region(size,
++							      	rsvd_region, n);
++					}
++				if (base != ~0UL) {
++					rsvd_region[n].start =
++						(unsigned long)__va(base);
++					rsvd_region[n].end =
++						(unsigned long)__va(base + size);
++					n++;
++					crashk_res.start = base;
++					crashk_res.end = base + size - 1;
++				}
++			}
++		}
++		efi_memmap_res.start = ia64_boot_param->efi_memmap;
++                efi_memmap_res.end = efi_memmap_res.start +
++                        ia64_boot_param->efi_memmap_size;
++                boot_param_res.start = kexec_virt_to_phys(ia64_boot_param);
++                boot_param_res.end = boot_param_res.start +
++                        sizeof(*ia64_boot_param);
++#ifdef CONFIG_XEN
++		}
++#endif
++	}
++#endif
+ 	/* end of memory marker */
+ 	rsvd_region[n].start = ~0UL;
+ 	rsvd_region[n].end   = ~0UL;
+@@ -263,6 +357,7 @@
+ 
+ 	sort_regions(rsvd_region, num_rsvd_regions);
+ }
++
+ 
+ /**
+  * find_initrd - get initrd parameters from the boot parameter structure
+@@ -397,10 +492,48 @@
+ }
+ early_param("nomca", setup_nomca);
+ 
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++static int __init parse_elfcorehdr(char *arg)
++{
++	if (!arg)
++		return -EINVAL;
++
++        elfcorehdr_addr = memparse(arg, &arg);
++	return 0;
++}
++early_param("elfcorehdr", parse_elfcorehdr);
++#endif /* CONFIG_PROC_VMCORE */
++
+ void __init
+ setup_arch (char **cmdline_p)
  {
++#ifdef CONFIG_XEN
++	shared_info_t *s = NULL;
++	if (is_running_on_xen()) {
++		s = HYPERVISOR_shared_info;
++		xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
++	}
++#endif
++
  	unw_init();
- 
++
 +#ifdef CONFIG_XEN
 +	if (is_running_on_xen()) {
 +		/* Must be done before any hypercall.  */
-+		xencomm_init();
++		xencomm_initialize();
 +
 +		setup_xen_features();
 +		/* Register a call for panic conditions. */
 +		atomic_notifier_chain_register(&panic_notifier_list,
 +		                               &xen_panic_block);
 +		pm_power_off = xen_pm_power_off;
++
++		xen_ia64_enable_opt_feature();
 +	}
 +#endif
-+
+ 
  	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
  
- 	*cmdline_p = __va(ia64_boot_param->command_line);
-@@ -478,14 +533,79 @@ setup_arch (char **cmdline_p)
- 			conswitchp = &vga_con;
- # endif
- 	}
+@@ -464,6 +597,57 @@
+ 	acpi_boot_init();
+ #endif
+ 
 +#ifdef CONFIG_XEN
 +	if (is_running_on_xen()) {
-+		shared_info_t *s = HYPERVISOR_shared_info;
-+
-+		xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
-+
 +		printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%ld "
 +		       "flags=0x%x\n", s->arch.start_info_pfn,
 +		       xen_start_info->nr_pages, xen_start_info->flags);
 +
-+		if (!is_initial_xendomain()) {
-+#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
-+			conswitchp = NULL;
-+#endif
-+		}
-+		
 +		/*
 +		 * If a console= is NOT specified, we assume using the
-+		 * xencons console is desired.  By default, this is ttyS0
-+		 * for dom0 and tty0 for domU.
++		 * xencons console is desired.  By default, this is xvc0
++		 * for both dom0 and domU.
 +		 */
 +		if (!strstr(*cmdline_p, "console=")) {
-+			char *p, *q, name[5];
++			char *p, *q, name[5] = "xvc";
 +			int offset = 0;
 +
-+			if (is_initial_xendomain())
-+				strncpy(name, "ttyS", 4);
-+			else
++#if defined(CONFIG_VGA_CONSOLE)
++			/*
++			 * conswitchp might be set intelligently from the
++			 * PCDP code.  If set to VGA console, use it.
++			 */
++			if (is_initial_xendomain() && conswitchp == &vga_con)
 +				strncpy(name, "tty", 3);
++#endif
 +
 +			p = strstr(*cmdline_p, "xencons=");
 +
@@ -23647,47 +25978,114 @@
 +				add_preferred_console(name, offset, NULL);
 +		}
 +	}
-+	xencons_early_setup();
 +#endif
++
+ #ifdef CONFIG_VT
+ 	if (!conswitchp) {
+ # if defined(CONFIG_DUMMY_CONSOLE)
+@@ -483,11 +667,28 @@
  #endif
  
-+
  	/* enable IA-64 Machine Check Abort Handling unless disabled */
 +#ifdef CONFIG_XEN
-+	if (is_running_on_xen() && !is_initial_xendomain())
++	if (is_running_on_xen() && !is_initial_xendomain()) {
 +		nomca = 1;
++#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
++		conswitchp = NULL;
++#endif
++	}
 +#endif
  	if (!nomca)
  		ia64_mca_init();
  
  	platform_setup(cmdline_p);
++#ifdef CONFIG_XEN
++	if (is_running_on_xen() && !ia64_platform_is("xen")) {
++		extern ia64_mv_setup_t xen_setup;
++		xen_setup(cmdline_p);
++	}
++#endif
  	paging_init();
 +#ifdef CONFIG_XEN
-+	contiguous_bitmap_init(max_pfn);
++	xen_contiguous_bitmap_init(max_pfn);
 +#endif
  }
  
  /*
-@@ -870,6 +990,15 @@ cpu_init (void)
+@@ -872,6 +1073,13 @@
  	/* size of physical stacked register partition plus 8 bytes: */
  	__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
  	platform_cpu_init();
-+
 +#ifdef CONFIG_XEN
-+	/* Need to be moved into platform_cpu_init later */
-+	if (is_running_on_xen()) {
-+		extern void xen_smp_intr_init(void);
-+		xen_smp_intr_init();
++	if (is_running_on_xen() && !ia64_platform_is("xen")) {
++		extern ia64_mv_cpu_init_t xen_cpu_init;
++		xen_cpu_init();
 +	}
 +#endif
 +
  	pm_idle = default_idle;
  }
  
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/smp.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/smp.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/smp.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/smp.c	2007-10-14 01:51:15.000000000 +0200
-@@ -328,10 +328,14 @@ int
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/smp.c
+--- a/arch/ia64/kernel/smp.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/smp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -30,6 +30,7 @@
+ #include <linux/delay.h>
+ #include <linux/efi.h>
+ #include <linux/bitops.h>
++#include <linux/kexec.h>
+ 
+ #include <asm/atomic.h>
+ #include <asm/current.h>
+@@ -66,6 +67,7 @@
+ 
+ #define IPI_CALL_FUNC		0
+ #define IPI_CPU_STOP		1
++#define IPI_KDUMP_CPU_STOP	3
+ 
+ /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
+ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+@@ -155,7 +157,11 @@
+ 			      case IPI_CPU_STOP:
+ 				stop_this_cpu();
+ 				break;
+-
++#ifdef CONFIG_KEXEC
++			      case IPI_KDUMP_CPU_STOP:
++				unw_init_running(kdump_cpu_freeze, NULL);
++				break;
++#endif
+ 			      default:
+ 				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
+ 				break;
+@@ -213,6 +219,26 @@
+ 	send_IPI_single(smp_processor_id(), op);
+ }
+ 
++#ifdef CONFIG_KEXEC
++void
++kdump_smp_send_stop()
++{
++ 	send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
++}
++
++void
++kdump_smp_send_init()
++{
++	unsigned int cpu, self_cpu;
++	self_cpu = smp_processor_id();
++	for_each_online_cpu(cpu) {
++		if (cpu != self_cpu) {
++			if(kdump_status[cpu] == 0)
++				platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
++		}
++	}
++}
++#endif
+ /*
+  * Called with preeemption disabled.
+  */
+@@ -328,10 +354,14 @@
  smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
  {
  	struct call_data_struct data;
@@ -23704,22 +26102,23 @@
  
  	/* Can deadlock when called with interrupts disabled */
  	WARN_ON(irqs_disabled());
-@@ -343,8 +347,6 @@ smp_call_function (void (*func) (void *i
+@@ -342,8 +372,6 @@
+ 	data.wait = wait;
  	if (wait)
  		atomic_set(&data.finished, 0);
- 
--	spin_lock(&call_lock);
 -
+-	spin_lock(&call_lock);
+ 
  	call_data = &data;
  	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- 	send_IPI_allbutself(IPI_CALL_FUNC);
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/kernel/time.c tmp-linux-2.6-xen.patch/arch/ia64/kernel/time.c
---- pristine-linux-2.6.18.2/arch/ia64/kernel/time.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/kernel/time.c	2007-09-30 18:06:18.000000000 +0200
-@@ -29,6 +29,13 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/time.c
+--- a/arch/ia64/kernel/time.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/time.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -29,6 +29,14 @@
  #include <asm/sections.h>
  #include <asm/system.h>
  
++#include <asm/hypervisor.h>
 +#ifdef CONFIG_XEN
 +#include <linux/kernel_stat.h>
 +#include <linux/posix-timers.h>
@@ -23730,7 +26129,7 @@
  extern unsigned long wall_jiffies;
  
  volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
-@@ -40,16 +47,109 @@ EXPORT_SYMBOL(last_cli_ip);
+@@ -40,16 +48,109 @@
  
  #endif
  
@@ -23840,30 +26239,30 @@
  
  	if (unlikely(cpu_is_offline(smp_processor_id()))) {
  		return IRQ_HANDLED;
-@@ -65,6 +165,13 @@ timer_interrupt (int irq, void *dev_id, 
+@@ -64,6 +165,13 @@
+ 		       ia64_get_itc(), new_itm);
  
  	profile_tick(CPU_PROFILING, regs);
- 
++
 +	if (is_running_on_xen()) {
 +		delta_itm = consider_steal_time(new_itm, regs);
 +		new_itm += delta_itm;
 +		if (time_after(new_itm, ia64_get_itc()) && delta_itm)
 +			goto skip_process_time_accounting;
 +	}
-+
+ 
  	while (1) {
  		update_process_times(user_mode(regs));
- 
-@@ -88,6 +195,8 @@ timer_interrupt (int irq, void *dev_id, 
+@@ -87,6 +195,8 @@
+ 		if (time_after(new_itm, ia64_get_itc()))
  			break;
  	}
- 
-+skip_process_time_accounting:	/* XEN */
 +
++skip_process_time_accounting:	/* XEN */
+ 
  	do {
  		/*
- 		 * If we're too close to the next clock tick for
-@@ -142,6 +251,85 @@ static int __init nojitter_setup(char *s
+@@ -142,6 +252,85 @@
  
  __setup("nojitter", nojitter_setup);
  
@@ -23949,7 +26348,7 @@
  
  void __devinit
  ia64_init_itm (void)
-@@ -225,6 +413,12 @@ ia64_init_itm (void)
+@@ -225,6 +414,12 @@
  		register_time_interpolator(&itc_interpolator);
  	}
  
@@ -23962,88 +26361,169 @@
  	/* Setup the CPU local timer tick */
  	ia64_cpu_local_tick();
  }
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/Makefile tmp-linux-2.6-xen.patch/arch/ia64/Makefile
---- pristine-linux-2.6.18.2/arch/ia64/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -45,6 +45,12 @@ ifeq ($(call cc-version),0304)
- endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/vmlinux.lds.S
+--- a/arch/ia64/kernel/vmlinux.lds.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/kernel/vmlinux.lds.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -183,6 +183,12 @@
+ 	  __start_gate_section = .;
+ 	  *(.data.gate)
+ 	  __stop_gate_section = .;
++#if defined(CONFIG_XEN)
++	  . = ALIGN(PAGE_SIZE);
++	  __start_xen_gate_section = .;
++	  *(.data.gate.xen)
++	  __stop_xen_gate_section = .;
++#endif
+ 	}
+   . = ALIGN(PAGE_SIZE);		/* make sure the gate page doesn't expose kernel data */
  
- CFLAGS += $(cflags-y)
-+
-+cppflags-$(CONFIG_XEN) += \
-+	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/kernel/xengate-data.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/kernel/xengate-data.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3 @@
++	.section .data.gate.xen, "aw"
 +
-+CPPFLAGS += $(cppflags-y)
++	.incbin "arch/ia64/kernel/xengate.so"
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/mm/contig.c
+--- a/arch/ia64/mm/contig.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/mm/contig.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -18,6 +18,9 @@
+ #include <linux/efi.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h>
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++#include <linux/kexec.h>
++#endif
+ 
+ #include <asm/meminit.h>
+ #include <asm/pgalloc.h>
+@@ -172,8 +175,12 @@
+ 	/* Free all available memory, then mark bootmem-map as being in use. */
+ 	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+ 	reserve_bootmem(bootmap_start, bootmap_size);
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++	xen_machine_kexec_setup_resources();
++#endif
+ 
+ 	find_initrd();
 +
- head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+ }
  
- libs-y				+= arch/ia64/lib/
-@@ -55,9 +61,15 @@ core-$(CONFIG_IA64_GENERIC) 	+= arch/ia6
- core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
- core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
- core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
-+core-$(CONFIG_XEN)		+= arch/ia64/xen/
+ #ifdef CONFIG_SMP
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/mm/discontig.c
+--- a/arch/ia64/mm/discontig.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/mm/discontig.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -21,6 +21,9 @@
+ #include <linux/acpi.h>
+ #include <linux/efi.h>
+ #include <linux/nodemask.h>
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++#include <linux/kexec.h>
++#endif
+ #include <asm/pgalloc.h>
+ #include <asm/tlb.h>
+ #include <asm/meminit.h>
+@@ -502,6 +505,9 @@
+ 	reserve_pernode_space();
+ 	memory_less_nodes();
+ 	initialize_pernode_data();
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++	xen_machine_kexec_setup_resources();
++#endif
  
- drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
-+ifneq ($(CONFIG_XEN),y)
- drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
-+endif
-+ifneq ($(CONFIG_IA64_GENERIC),y)
-+drivers-$(CONFIG_XEN)		+= arch/ia64/hp/sim/
-+endif
- drivers-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_GENERIC)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
-@@ -87,8 +99,8 @@ CLEAN_FILES += vmlinux.gz bootloader
- boot:	lib/lib.a vmlinux
- 	$(Q)$(MAKE) $(build)=$(boot) $@
+ 	max_pfn = max_low_pfn;
  
--install: vmlinux.gz
--	sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
-+install:
-+	-yes | sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/mm/init.c
+--- a/arch/ia64/mm/init.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/mm/init.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -303,16 +303,34 @@
+ setup_gate (void)
+ {
+ 	struct page *page;
++	void *gate_page_addr = __start_gate_section;
++	
++#ifdef CONFIG_XEN
++	unsigned long unused_gate;
++	extern char __start_xen_gate_section[];
++	if (is_running_on_xen()) {
++		gate_page_addr = __start_xen_gate_section;
++		unused_gate = (unsigned long)ia64_imva(__start_gate_section);
++	} else
++		unused_gate =
++			(unsigned long)ia64_imva(__start_xen_gate_section);
++#ifndef HAVE_BUGGY_SEGREL
++	ClearPageReserved(virt_to_page(unused_gate));
++	init_page_count(virt_to_page(unused_gate));
++	free_page(unused_gate);
++	++totalram_pages;
++#endif
++#endif
  
- define archhelp
-   echo '* compressed	- Build compressed kernel image'
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/mm/ioremap.c tmp-linux-2.6-xen.patch/arch/ia64/mm/ioremap.c
---- pristine-linux-2.6.18.2/arch/ia64/mm/ioremap.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/mm/ioremap.c	2007-07-30 16:35:11.000000000 +0200
+ 	/*
+ 	 * Map the gate page twice: once read-only to export the ELF
+ 	 * headers etc. and once execute-only page to enable
+ 	 * privilege-promotion via "epc":
+ 	 */
+-	page = virt_to_page(ia64_imva(__start_gate_section));
++	page = virt_to_page(ia64_imva(gate_page_addr));
+ 	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
+ #ifdef HAVE_BUGGY_SEGREL
+-	page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
++	page = virt_to_page(ia64_imva(gate_page_addr + PAGE_SIZE));
+ 	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
+ #else
+ 	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/mm/ioremap.c
+--- a/arch/ia64/mm/ioremap.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/mm/ioremap.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -16,6 +16,9 @@
  static inline void __iomem *
  __ioremap (unsigned long offset, unsigned long size)
  {
 +	offset = HYPERVISOR_ioremap(offset, size);
 +	if (IS_ERR_VALUE(offset))
-+		return (void __iomem*)offset;
++		return NULL;
  	return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
  }
  
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/oprofile/init.c tmp-linux-2.6-xen.patch/arch/ia64/oprofile/init.c
---- pristine-linux-2.6.18.2/arch/ia64/oprofile/init.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/oprofile/init.c	2007-07-30 16:35:11.000000000 +0200
-@@ -11,6 +11,7 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/oprofile/Makefile
+--- a/arch/ia64/oprofile/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/oprofile/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -8,3 +8,7 @@
+ 
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_PERFMON) += perfmon.o
++ifeq ($(CONFIG_XEN), y)
++oprofile-$(CONFIG_PERFMON) += xenoprof.o \
++	../../../drivers/xen/xenoprof/xenoprofile.o
++endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/oprofile/init.c
+--- a/arch/ia64/oprofile/init.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/oprofile/init.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -11,6 +11,8 @@
  #include <linux/oprofile.h>
  #include <linux/init.h>
  #include <linux/errno.h>
++#include <asm/hypervisor.h>
 +#include "oprofile_perfmon.h"
   
  extern int perfmon_init(struct oprofile_operations * ops);
  extern void perfmon_exit(void);
-@@ -20,6 +21,13 @@ int __init oprofile_arch_init(struct opr
+@@ -19,6 +21,13 @@
+ int __init oprofile_arch_init(struct oprofile_operations * ops)
  {
  	int ret = -ENODEV;
- 
++
 +	if (is_running_on_xen()) {
 +		ret = xen_perfmon_init();
 +		if (ret)
 +			return ret;
 +		return xenoprofile_init(ops);
 +	}
-+
+ 
  #ifdef CONFIG_PERFMON
  	/* perfmon_init() can fail, but we have no way to report it */
- 	ret = perfmon_init(ops);
-@@ -32,6 +40,12 @@ int __init oprofile_arch_init(struct opr
+@@ -32,6 +41,12 @@
  
  void oprofile_arch_exit(void)
  {
@@ -24056,29 +26536,20 @@
  #ifdef CONFIG_PERFMON
  	perfmon_exit();
  #endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/oprofile/Makefile tmp-linux-2.6-xen.patch/arch/ia64/oprofile/Makefile
---- pristine-linux-2.6.18.2/arch/ia64/oprofile/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/oprofile/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -8,3 +8,7 @@ DRIVER_OBJS := $(addprefix ../../../driv
- 
- oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
- oprofile-$(CONFIG_PERFMON) += perfmon.o
-+ifeq ($(CONFIG_XEN), y)
-+oprofile-$(CONFIG_PERFMON) += xenoprof.o \
-+	../../../drivers/xen/xenoprof/xenoprofile.o
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/oprofile/oprofile_perfmon.h tmp-linux-2.6-xen.patch/arch/ia64/oprofile/oprofile_perfmon.h
---- pristine-linux-2.6.18.2/arch/ia64/oprofile/oprofile_perfmon.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/oprofile/oprofile_perfmon.h	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,28 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/oprofile/oprofile_perfmon.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/oprofile/oprofile_perfmon.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,30 @@
 +#ifndef OPROFILE_PERFMON_H
 +#define OPROFILE_PERFMON_H
 +
 +#ifdef CONFIG_PERFMON
++#ifdef CONFIG_XEN
 +int __perfmon_init(void);
 +void __perfmon_exit(void);
 +int perfmon_start(void);
 +void perfmon_stop(void);
++#endif
 +#else
 +#define __perfmon_init()	(-ENOSYS)
 +#define __perfmon_exit()	do {} while (0)
@@ -24094,14 +26565,14 @@
 +#define STATIC_IF_NO_XEN	static
 +#define xen_perfmon_init()	(-ENOSYS)
 +#define xen_perfmon_exit()	do {} while (0)
-+#define xenoprofile_init()	(-ENOSYS)
++#define xenoprofile_init(ops)	(-ENOSYS)
 +#define xenoprofile_exit()	do {} while (0)
 +#endif /* CONFIG_XEN */
 +
 +#endif /* OPROFILE_PERFMON_H */
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/oprofile/perfmon.c tmp-linux-2.6-xen.patch/arch/ia64/oprofile/perfmon.c
---- pristine-linux-2.6.18.2/arch/ia64/oprofile/perfmon.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/oprofile/perfmon.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/oprofile/perfmon.c
+--- a/arch/ia64/oprofile/perfmon.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/oprofile/perfmon.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -13,6 +13,7 @@
  #include <asm/perfmon.h>
  #include <asm/ptrace.h>
@@ -24110,7 +26581,7 @@
  
  static int allow_ints;
  
-@@ -33,14 +34,16 @@ perfmon_handler(struct task_struct *task
+@@ -33,14 +34,16 @@
  }
  
  
@@ -24129,18 +26600,17 @@
  {
  	allow_ints = 0;
  }
-@@ -75,16 +78,35 @@ static char * get_cpu_type(void)
+@@ -75,16 +78,35 @@
  
  static int using_perfmon;
  
--int perfmon_init(struct oprofile_operations * ops)
 +STATIC_IF_NO_XEN
 +int __perfmon_init(void)
- {
- 	int ret = pfm_register_buffer_fmt(&oprofile_fmt);
- 	if (ret)
- 		return -ENODEV;
- 
++{
++	int ret = pfm_register_buffer_fmt(&oprofile_fmt);
++	if (ret)
++		return -ENODEV;
++
 +	using_perfmon = 1;
 +	return 0;
 +}
@@ -24154,12 +26624,13 @@
 +	pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
 +}
 +
-+int perfmon_init(struct oprofile_operations * ops)
-+{
+ int perfmon_init(struct oprofile_operations * ops)
+ {
+-	int ret = pfm_register_buffer_fmt(&oprofile_fmt);
 +	int ret = __perfmon_init();
-+	if (ret)
-+		return -ENODEV;
-+
+ 	if (ret)
+ 		return -ENODEV;
+ 
  	ops->cpu_type = get_cpu_type();
  	ops->start = perfmon_start;
  	ops->stop = perfmon_stop;
@@ -24167,7 +26638,7 @@
  	printk(KERN_INFO "oprofile: using perfmon.\n");
  	return 0;
  }
-@@ -92,8 +114,5 @@ int perfmon_init(struct oprofile_operati
+@@ -92,8 +114,5 @@
  
  void perfmon_exit(void)
  {
@@ -24177,9 +26648,9 @@
 -	pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
 +	__perfmon_exit();
  }
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/oprofile/xenoprof.c tmp-linux-2.6-xen.patch/arch/ia64/oprofile/xenoprof.c
---- pristine-linux-2.6.18.2/arch/ia64/oprofile/xenoprof.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/oprofile/xenoprof.c	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/oprofile/xenoprof.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/oprofile/xenoprof.c	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,142 @@
 +/******************************************************************************
 + * xenoprof ia64 specific part
@@ -24234,9 +26705,9 @@
 +}
 +
 +/* XXX move them to an appropriate header file. */
-+struct resource* xen_ia64_allocate_resource(unsigned long size); 
-+void xen_ia64_release_resource(struct resource* res); 
-+void xen_ia64_unmap_resource(struct resource* res); 
++struct resource* xen_ia64_allocate_resource(unsigned long size);
++void xen_ia64_release_resource(struct resource *res);
++void xen_ia64_unmap_resource(struct resource *res);
 +
 +struct resource*
 +xenoprof_ia64_allocate_resource(int32_t max_samples)
@@ -24256,7 +26727,7 @@
 +	return xen_ia64_allocate_resource(bufsize);
 +}
 +
-+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf)
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf)
 +{
 +	if (sbuf->buffer) {
 +		xen_ia64_unmap_resource(sbuf->arch.res);
@@ -24265,11 +26736,11 @@
 +	}
 +}
 +
-+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
-+                                    struct xenoprof_shared_buffer* sbuf)
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++                                    struct xenoprof_shared_buffer *sbuf)
 +{
 +	int ret;
-+	struct resource* res;
++	struct resource *res;
 +
 +	sbuf->buffer = NULL;
 +	sbuf->arch.res = NULL;
@@ -24295,11 +26766,11 @@
 +	return ret;
 +}
 +
-+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
-+                              struct xenoprof_shared_buffer* sbuf)
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++                              struct xenoprof_shared_buffer *sbuf)
 +{
 +	int ret;
-+	struct resource* res;
++	struct resource *res;
 +
 +	sbuf->buffer = NULL;
 +	sbuf->arch.res = NULL;
@@ -24323,52 +26794,227 @@
 +
 +	return ret;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/pci/pci.c tmp-linux-2.6-xen.patch/arch/ia64/pci/pci.c
---- pristine-linux-2.6.18.2/arch/ia64/pci/pci.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/pci/pci.c	2007-09-30 18:06:18.000000000 +0200
-@@ -165,6 +165,11 @@ new_space (u64 phys_base, int sparse)
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/pci/pci.c
+--- a/arch/ia64/pci/pci.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/pci/pci.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -29,6 +29,15 @@
+ #include <asm/smp.h>
+ #include <asm/irq.h>
+ #include <asm/hw_irq.h>
++
++#ifdef CONFIG_XEN
++struct ioremap_issue_list {
++	struct list_head		listp;
++	unsigned long			start;
++	unsigned long			end;
++};
++typedef struct ioremap_issue_list	ioremap_issue_list_t;
++#endif /* CONFIG_XEN */
+ 
+ /*
+  * Low-level SAL-based PCI configuration access functions. Note that SAL
+@@ -164,6 +173,11 @@
+ 	i = num_io_spaces++;
  	io_space[i].mmio_base = mmio_base;
  	io_space[i].sparse = sparse;
- 
++
 +#ifdef CONFIG_XEN
 +	if (is_initial_xendomain())
 +		HYPERVISOR_add_io_space(phys_base, sparse, i);
 +#endif
-+
+ 
  	return i;
  }
+@@ -332,6 +346,169 @@
+ 	}
+ }
  
-@@ -607,6 +612,14 @@ pci_mmap_page_range (struct pci_dev *dev
- 	else
- 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- 
-+	if (is_initial_xendomain()) {
-+		unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
-+		size_t size = vma->vm_end - vma->vm_start;
-+		unsigned long offset = HYPERVISOR_ioremap(addr, size);
-+		if (IS_ERR_VALUE(offset))
-+			return offset;
++#ifdef CONFIG_XEN
++static void __devinit
++__cleanup_issue_list(struct list_head *top)
++{
++	ioremap_issue_list_t *ptr, *tmp_ptr;
++
++	list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++		list_del(&(ptr->listp));
++		kfree(ptr);
 +	}
++}
 +
- 	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- 			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
- 		return -EAGAIN;
-@@ -664,6 +677,14 @@ pci_mmap_legacy_page_range(struct pci_bu
- 	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
- 	vma->vm_page_prot = prot;
- 
-+	if (is_initial_xendomain()) {
-+		unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
-+		size_t size = vma->vm_end - vma->vm_start;
-+		unsigned long offset = HYPERVISOR_ioremap(addr, size);
-+		if (IS_ERR_VALUE(offset))
-+			return offset;
++static int __devinit
++__add_issue_list(unsigned long start, unsigned long end, struct list_head *top)
++{
++	ioremap_issue_list_t *ptr, *new;
++
++	if (start > end) {
++		printk(KERN_ERR "%s: Internal error (start addr > end addr)\n",
++		       __FUNCTION__);
++		return 0;
++	}
++
++	/*
++	 * Head of the resource structure list contains
++	 * dummy val.(start=0, end=~0), so skip it
++	 */
++	if ((start == 0) && (end == ~0))
++		return 0;
++
++	start &= PAGE_MASK;
++	end |= ~PAGE_MASK;
++
++	/* We can merge specified address range into existing entry */
++	list_for_each_entry(ptr, top, listp) {
++		if ((ptr->start > end + 1) || (ptr->end + 1 < start))
++			continue;
++		ptr->start = min(start, ptr->start);
++		ptr->end = max(end, ptr->end);
++		return 0;
++	}
++
++	/* We could not merge, so create new entry */
++	new = kmalloc(sizeof(ioremap_issue_list_t), GFP_KERNEL);
++	if (new == NULL) {
++		printk(KERN_ERR "%s: Could not allocate memory. "
++		       "HYPERVISOR_ioremap will not be issued\n",
++		       __FUNCTION__);
++		return -ENOMEM;
++	}
++
++	new->start = start;
++	new->end = end;
++
++	/* Insert the new entry to the list by ascending order */
++	if (list_empty(top)) {
++		list_add_tail(&(new->listp), top);
++		return 0;
++	}
++	list_for_each_entry(ptr, top, listp) {
++		if (new->start > ptr->start)
++			continue;
++		list_add(&(new->listp), ((struct list_head *)ptr)->prev);
++		return 0;
++	}
++	list_add_tail(&(new->listp), top);
++
++	return 0;
++}
++
++static int __devinit
++__make_issue_list(struct resource *ptr, struct list_head *top)
++{
++	int ret;
++
++	if (ptr->child) {
++		ret = __make_issue_list(ptr->child, top);
++		if (ret)
++			return ret;
++	}
++	if (ptr->sibling) {
++		ret = __make_issue_list(ptr->sibling, top);
++		if (ret)
++			return ret;
++	}
++
++	if (ptr->flags & IORESOURCE_MEM) {
++		ret = __add_issue_list(ptr->start, ptr->end, top);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static void __devinit
++__compress_issue_list(struct list_head *top)
++{
++	ioremap_issue_list_t *ptr, *tmp_ptr, *next;
++	int compressed;
++
++	/*
++	 * Merge adjacent entries, if overlapped
++	 * (entries are sorted by ascending order)
++	 */
++	list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++		if (list_is_last((struct list_head *)ptr, top))
++			continue;
++
++		next = (ioremap_issue_list_t *)
++			(((struct list_head *)ptr)->next);
++		if (next->start <= (ptr->end) + 1) {
++			next->start = min(ptr->start, next->start);
++			next->end   = max(ptr->end, next->end);
++
++			list_del(&(ptr->listp));
++			kfree(ptr);
++		}
++	}
++}
++
++static int __devinit
++__issue_ioremap(struct list_head *top)
++{
++	ioremap_issue_list_t *ptr, *tmp_ptr;
++	unsigned int offset;
++
++	list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++		offset = HYPERVISOR_ioremap(ptr->start,
++					    ptr->end - ptr->start + 1);
++		if (offset == ~0) {
++			printk(KERN_ERR "%s: HYPERVISOR_ioremap() failed. "
++			       "Address Range: 0x%016lx-0x%016lx\n",
++			       __FUNCTION__, ptr->start, ptr->end);
++		}
++
++		list_del(&(ptr->listp));
++		kfree(ptr);
++	}
++	
++	return 0;
++}
++
++static int __devinit
++do_ioremap_on_resource_list(struct resource *top)
++{
++	LIST_HEAD(ioremap_issue_list_top);
++	int ret;
++
++	ret = __make_issue_list(top, &ioremap_issue_list_top);
++	if (ret) {
++		__cleanup_issue_list(&ioremap_issue_list_top);
++		return ret;
 +	}
 +
- 	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- 			    size, vma->vm_page_prot))
- 		return -EAGAIN;
-@@ -818,3 +839,31 @@ int pci_vector_resources(int last, int n
++	__compress_issue_list(&ioremap_issue_list_top);
++
++	(void)__issue_ioremap(&ioremap_issue_list_top);
++
++	return 0;
++}
++#endif /* CONFIG_XEN */
++
+ struct pci_bus * __devinit
+ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
+ {
+@@ -374,6 +551,18 @@
+ 	pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
+ 	if (pbus)
+ 		pcibios_setup_root_windows(pbus, controller);
++
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain()) {
++		if (do_ioremap_on_resource_list(&iomem_resource) != 0) {
++			printk(KERN_ERR
++			       "%s: Counld not issue HYPERVISOR_ioremap "
++			       "due to lack of memory or hypercall failure\n",
++			       __FUNCTION__);
++			goto out3;
++		}
++	}
++#endif /* CONFIG_XEN */
+ 
+ 	return pbus;
+ 
+@@ -818,3 +1007,31 @@
  
  	return count;
  }
@@ -24400,23 +27046,112 @@
 +}
 +EXPORT_SYMBOL(xen_pcibios_setup_root_windows);
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/hypercall.S tmp-linux-2.6-xen.patch/arch/ia64/xen/hypercall.S
---- pristine-linux-2.6.18.2/arch/ia64/xen/hypercall.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/hypercall.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,170 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/sn/kernel/setup.c
+--- a/arch/ia64/sn/kernel/setup.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/sn/kernel/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -763,5 +763,13 @@
+ 		return 0;
+ 	return test_bit(id, sn_prom_features);
+ }
++
++void
++sn_kernel_launch_event(void)
++{
++	/* ignore status until we understand possible failure, if any*/
++	if (ia64_sn_kernel_launch_event())
++		printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
++}
+ EXPORT_SYMBOL(sn_prom_feature_available);
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/sn/pci/pcibr/pcibr_provider.c
+--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,6 +15,7 @@
+ #include <asm/sn/pcibus_provider_defs.h>
+ #include <asm/sn/pcidev.h>
+ #include <asm/sn/sn_sal.h>
++#include <asm/sn/pic.h>
+ #include <asm/sn/sn2/sn_hwperf.h>
+ #include "xtalk/xwidgetdev.h"
+ #include "xtalk/hubdev.h"
+@@ -129,9 +130,9 @@
+ 	}
+ 
+ 	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
+-	soft->pbi_buscommon.bs_base =
+-	    (((u64) soft->pbi_buscommon.
+-	      bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
++	soft->pbi_buscommon.bs_base = (unsigned long)
++		ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
++			sizeof(struct pic));
+ 
+ 	spin_lock_init(&soft->pbi_lock);
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/sn/pci/tioca_provider.c
+--- a/arch/ia64/sn/pci/tioca_provider.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/sn/pci/tioca_provider.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -611,7 +611,9 @@
+ 		return NULL;
+ 
+ 	memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
+-	tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
++	tioca_common->ca_common.bs_base = (unsigned long)
++		ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
++			sizeof(struct tioca_common));
+ 
+ 	/* init kernel-private area */
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/sn/pci/tioce_provider.c
+--- a/arch/ia64/sn/pci/tioce_provider.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/ia64/sn/pci/tioce_provider.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -1006,7 +1006,9 @@
+ 		return NULL;
+ 
+ 	memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
+-	tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET;
++	tioce_common->ce_pcibus.bs_base = (unsigned long)
++		ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),
++			sizeof(struct tioce_common));
+ 
+ 	tioce_kern = tioce_kern_init(tioce_common);
+ 	if (tioce_kern == NULL) {
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,9 @@
++#
++# Makefile for Xen components
++#
++
++obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o \
++	 hypervisor.o util.o xencomm.o xcom_hcall.o \
++	 xcom_privcmd.o xcom_asm.o xen_dma.o
++
++obj-$(CONFIG_IA64_GENERIC) += machvec.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/hypercall.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/hypercall.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,141 @@
 +/*
 + * Support routines for Xen hypercalls
 + *
 + * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
 + */
 +
-+#include <asm/processor.h>
 +#include <asm/asmmacro.h>
++#include <asm/intrinsics.h>
++
++#ifdef __INTEL_COMPILER
++# undef ASM_SUPPORTED
++#else
++# define ASM_SUPPORTED
++#endif
 +
++#ifndef ASM_SUPPORTED
 +GLOBAL_ENTRY(xen_get_psr)
 +	XEN_HYPER_GET_PSR
 +	br.ret.sptk.many rp
-+    ;;
++	;;
 +END(xen_get_psr)
 +
 +GLOBAL_ENTRY(xen_get_ivr)
@@ -24519,65 +27254,29 @@
 +	XEN_HYPER_SET_EFLAG
 +	br.ret.sptk.many rp
 +END(xen_set_eflag)
-+#endif
++#endif /* CONFIG_IA32_SUPPORT */
++#endif /* ASM_SUPPORTED */
 +
 +GLOBAL_ENTRY(xen_send_ipi)
-+        mov r14=r32
-+        mov r15=r33
-+        mov r2=0x400
-+        break 0x1000
-+        ;;
-+        br.ret.sptk.many rp
-+        ;;
++	mov r14=r32
++	mov r15=r33
++	mov r2=0x400
++	break 0x1000
++	;;
++	br.ret.sptk.many rp
++	;;
 +END(xen_send_ipi)
 +
-+#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
-+// Those are vdso specialized.
-+// In fsys mode, call, ret can't be used.
-+
-+	// see xen_ssm_i() in privop.h
-+	// r22 = &vcpu->vcpu_info->evtchn_upcall_mask
-+	// r23 = &vpsr.ic
-+	// r24 = &vcpu->vcpu_info->evtchn_upcall_pending
-+	// r25 = tmp
-+	// r31 = tmp
-+	// p11 = tmp
-+	// p14 = tmp
-+#define XEN_SET_PSR_I			\
-+	ld1 r31=[r22];			\
-+	ld1 r25=[r24];			\
-+	;;				\
-+	st1 [r22]=r0;			\
-+	cmp.ne.unc p14,p0=r0,r31;	\
-+	;;				\
-+(p14)	cmp.ne.unc p11,p0=r0,r25;	\
-+	;;				\
-+(p11)	st1 [r22]=r20;			\
-+(p11)	XEN_HYPER_SSM_I;
-+		
-+GLOBAL_ENTRY(xen_ssm_i_0)
-+	XEN_SET_PSR_I
-+	brl.cond.sptk	.vdso_ssm_i_0_ret
-+	;; 
-+END(xen_ssm_i_0)
-+
-+GLOBAL_ENTRY(xen_ssm_i_1)
-+	XEN_SET_PSR_I
-+	brl.cond.sptk	.vdso_ssm_i_1_ret
-+	;; 
-+END(xen_ssm_i_1)
-+
 +GLOBAL_ENTRY(__hypercall)
 +	mov r2=r37
 +	break 0x1000
 +	br.ret.sptk.many b0
 +	;; 
 +END(__hypercall)
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/hypervisor.c tmp-linux-2.6-xen.patch/arch/ia64/xen/hypervisor.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/hypervisor.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1234 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/hypervisor.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/hypervisor.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1526 @@
 +/******************************************************************************
 + * include/asm-ia64/shadow.h
 + *
@@ -24600,7 +27299,6 @@
 + *
 + */
 +
-+//#include <linux/kernel.h>
 +#include <linux/spinlock.h>
 +#include <linux/bootmem.h>
 +#include <linux/module.h>
@@ -24612,15 +27310,16 @@
 +#include <asm/hypervisor.h>
 +#include <asm/hypercall.h>
 +#include <xen/interface/memory.h>
++#include <xen/xencons.h>
 +#include <xen/balloon.h>
 +
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE;
++shared_info_t *HYPERVISOR_shared_info __read_mostly =
++	(shared_info_t *)XSI_BASE;
 +EXPORT_SYMBOL(HYPERVISOR_shared_info);
 +
 +start_info_t *xen_start_info;
 +EXPORT_SYMBOL(xen_start_info);
 +
-+int running_on_xen;
 +EXPORT_SYMBOL(running_on_xen);
 +
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
@@ -24632,15 +27331,44 @@
 +
 +EXPORT_SYMBOL(__hypercall);
 +
-+//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
-+// move those to lib/contiguous_bitmap?
-+//XXX discontigmem/sparsemem
++void __init
++xen_setup(char **cmdline_p)
++{
++	struct dom0_vga_console_info *info;
++	extern void dig_setup(char **cmdline_p);
++
++	if (ia64_platform_is("xen"))
++		dig_setup(cmdline_p);
++
++	if (!is_running_on_xen() || !is_initial_xendomain())
++		return;
++
++	info = (void *)((char *)xen_start_info +
++			xen_start_info->console.dom0.info_off);
++	dom0_init_screen_info(info, xen_start_info->console.dom0.info_size);
++
++	xen_start_info->console.domU.mfn = 0;
++	xen_start_info->console.domU.evtchn = 0;
++}
++
++void __cpuinit
++xen_cpu_init(void)
++{
++	extern void xen_smp_intr_init(void);
++	xen_smp_intr_init();
++}
++
++/*
++ *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
++ * move those to lib/contiguous_bitmap?
++ *XXX discontigmem/sparsemem
++ */
 +
 +/*
 + * Bitmap is indexed by page number. If bit is set, the page is part of a
 + * xen_create_contiguous_region() area of memory.
 + */
-+unsigned long *contiguous_bitmap;
++unsigned long *contiguous_bitmap __read_mostly;
 +
 +#ifdef CONFIG_VIRTUAL_MEM_MAP
 +/* Following logic is stolen from create_mem_map_table() for virtual memmap */
@@ -24657,16 +27385,16 @@
 +	pte_t *pte;
 +
 +	bitmap_start = (unsigned long)contiguous_bitmap +
-+	               ((__pa(start) >> PAGE_SHIFT) >> 3);
++		       ((__pa(start) >> PAGE_SHIFT) >> 3);
 +	bitmap_end = (unsigned long)contiguous_bitmap +
-+	             (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
++		     (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
 +
 +	start_page = bitmap_start & PAGE_MASK;
 +	end_page = PAGE_ALIGN(bitmap_end);
 +	node = paddr_to_nid(__pa(start));
 +
 +	bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
-+	                                  end_page - start_page);
++					  end_page - start_page);
 +	BUG_ON(!bitmap);
 +	memset(bitmap, 0, end_page - start_page);
 +
@@ -24674,26 +27402,26 @@
 +		pgd = pgd_offset_k(address);
 +		if (pgd_none(*pgd))
 +			pgd_populate(&init_mm, pgd,
-+			             alloc_bootmem_pages_node(NODE_DATA(node),
-+			                                      PAGE_SIZE));
++				     alloc_bootmem_pages_node(NODE_DATA(node),
++							      PAGE_SIZE));
 +		pud = pud_offset(pgd, address);
 +
 +		if (pud_none(*pud))
 +			pud_populate(&init_mm, pud,
-+			             alloc_bootmem_pages_node(NODE_DATA(node),
-+			                                      PAGE_SIZE));
++				     alloc_bootmem_pages_node(NODE_DATA(node),
++							      PAGE_SIZE));
 +		pmd = pmd_offset(pud, address);
 +
 +		if (pmd_none(*pmd))
 +			pmd_populate_kernel(&init_mm, pmd,
-+			                    alloc_bootmem_pages_node
-+			                    (NODE_DATA(node), PAGE_SIZE));
++					    alloc_bootmem_pages_node
++					    (NODE_DATA(node), PAGE_SIZE));
 +		pte = pte_offset_kernel(pmd, address);
 +
 +		if (pte_none(*pte))
 +			set_pte(pte,
-+			        pfn_pte(__pa(bitmap + (address - start_page))
-+			                >> PAGE_SHIFT, PAGE_KERNEL));
++				pfn_pte(__pa(bitmap + (address - start_page))
++					>> PAGE_SHIFT, PAGE_KERNEL));
 +	}
 +	return 0;
 +}
@@ -24708,7 +27436,7 @@
 +}
 +
 +void
-+contiguous_bitmap_init(unsigned long end_pfn)
++xen_contiguous_bitmap_init(unsigned long end_pfn)
 +{
 +	unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
 +#ifndef CONFIG_VIRTUAL_MEM_MAP
@@ -24778,9 +27506,11 @@
 +	}
 +}
 +
-+// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
-+// are based on i386 xen_create_contiguous_region(),
-+// xen_destroy_contiguous_region()
++/*
++ * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
++ * are based on i386 xen_create_contiguous_region(),
++ * xen_destroy_contiguous_region()
++ */
 +
 +/* Protected by balloon_lock. */
 +#define MAX_CONTIG_ORDER 7
@@ -24826,9 +27556,8 @@
 +	balloon_lock(flags);
 +
 +	/* Get a new contiguous memory extent. */
-+	for (i = 0; i < num_gpfn; i++) {
++	for (i = 0; i < num_gpfn; i++)
 +		in_frames[i] = start_gpfn + i;
-+	}
 +	out_frame = start_gpfn;
 +	error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
 +	success = (exchange.nr_exchanged == num_gpfn);
@@ -24910,7 +27639,7 @@
 +			 .domid        = DOMID_SELF
 +		 },
 +		.nr_exchanged = 0
-+        };
++	};
 +	
 +
 +	if (!test_bit(start_gpfn, contiguous_bitmap))
@@ -24928,17 +27657,16 @@
 +
 +	contiguous_bitmap_clear(start_gpfn, num_gpfn);
 +
-+        /* Do the exchange for non-contiguous MFNs. */
++	/* Do the exchange for non-contiguous MFNs. */
 +	in_frame = start_gpfn;
-+	for (i = 0; i < num_gpfn; i++) {
++	for (i = 0; i < num_gpfn; i++)
 +		out_frames[i] = start_gpfn + i;
-+	}
 +	error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
 +	success = (exchange.nr_exchanged == 1);
 +	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
 +	BUG_ON(success && (error != 0));
 +	if (unlikely(error == -ENOSYS)) {
-+                /* Compatibility when XENMEM_exchange is unsupported. */
++		/* Compatibility when XENMEM_exchange is unsupported. */
 +		error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
 +					     &exchange.in);
 +		BUG_ON(error != 1);
@@ -24950,15 +27678,27 @@
 +	balloon_unlock(flags);
 +}
 +
++int
++xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
++			   unsigned int address_bits)
++{
++	return xen_create_contiguous_region((unsigned long)page_address(pages),
++					    order, address_bits);
++}
 +
-+///////////////////////////////////////////////////////////////////////////
-+// grant table hack
-+// cmd: GNTTABOP_xxx
-+
++/****************************************************************************
++ * grant table hack
++ * cmd: GNTTABOP_xxx
++ */
 +#include <linux/mm.h>
 +#include <xen/interface/xen.h>
 +#include <xen/gnttab.h>
 +
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++	return __va(frames[0] << PAGE_SHIFT);
++}
++
 +static void
 +gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
 +{
@@ -24968,16 +27708,19 @@
 +
 +	if (flags & GNTMAP_host_map) {
 +		if (flags & GNTMAP_application_map) {
-+			xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
++			xprintd("GNTMAP_application_map is not supported yet:"
++				" flags 0x%x\n", flags);
 +			BUG();
 +		}
 +		if (flags & GNTMAP_contains_pte) {
-+			xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
++			xprintd("GNTMAP_contains_pte is not supported yet"
++				" flags 0x%x\n", flags);
 +			BUG();
 +		}
 +	} else if (flags & GNTMAP_device_map) {
-+		xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
-+		BUG();//XXX not yet. actually this flag is not used.
++		xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
++			flags);
++		BUG(); /* XXX not yet. actually this flag is not used. */
 +	} else {
 +		BUG();
 +	}
@@ -24993,19 +27736,21 @@
 +				(struct gnttab_map_grant_ref*)uop + i);
 +		}
 +	}
-+	return xencomm_mini_hypercall_grant_table_op(cmd, uop, count);
++	return xencomm_hypercall_grant_table_op(cmd, uop, count);
 +}
 +EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
 +
-+///////////////////////////////////////////////////////////////////////////
-+// foreign mapping
++/**************************************************************************
++ * foreign mapping
++ */
 +#include <linux/efi.h>
-+#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
++#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
 +
 +static unsigned long privcmd_resource_min = 0;
-+// Xen/ia64 currently can handle pseudo physical address bits up to
-+// (PAGE_SHIFT * 3)
-+static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
++/* Xen/ia64 currently can handle pseudo physical address bits up to
++ * (PAGE_SHIFT * 3) */
++static unsigned long privcmd_resource_max =
++	GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
 +static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
 +
 +static unsigned long
@@ -25040,18 +27785,18 @@
 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 +
-+	// at first check the used highest address
++	/* at first check the used highest address */
 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-+		// nothing
++		/* nothing */;
 +	}
 +	md = p - efi_desc_size;
 +	privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
 +	if (xen_ia64_privcmd_check_size(privcmd_resource_min,
-+					privcmd_resource_max)) {
++					privcmd_resource_max))
 +		goto out;
-+	}
 +
-+	// the used highest address is too large. try to find the largest gap.
++	/* the used highest address is too large.
++	 * try to find the largest gap. */
 +	tmp_min = privcmd_resource_max;
 +	tmp_max = 0;
 +	gap_size = 0;
@@ -25065,23 +27810,21 @@
 +
 +		md = p;
 +		end = md_end_addr(md);
-+		if (end > privcmd_resource_max) {
++		if (end > privcmd_resource_max)
 +			break;
-+		}
 +		if (end < prev_end) {
-+			// work around. 
-+			// Xen may pass incompletely sorted memory
-+			// descriptors like
-+			// [x, x + length]
-+			// [x, x]
-+			// this order should be reversed.
++			/* work around. 
++			 * Xen may pass incompletely sorted memory
++			 * descriptors like
++			 * [x, x + length]
++			 * [x, x]
++			 * this order should be reversed. */
 +			continue;
 +		}
 +		next = p + efi_desc_size;
 +		next_start = next->phys_addr;
-+		if (next_start > privcmd_resource_max) {
++		if (next_start > privcmd_resource_max)
 +			next_start = privcmd_resource_max;
-+		}
 +		if (end < next_start && gap_size < (next_start - end)) {
 +			tmp_min = end;
 +			tmp_max = next_start;
@@ -25100,19 +27843,21 @@
 +	privcmd_resource_max = tmp_max;
 +	if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
 +					 privcmd_resource_max)) {
-+		// Any large enough gap isn't found.
-+		// go ahead anyway with the warning hoping that large region
-+		// won't be requested.
-+		printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
++		/* Any large enough gap isn't found.
++		 * go ahead anyway with the warning hoping that large region
++		 * won't be requested. */
++		printk(KERN_WARNING "xen privcmd: "
++		       "large enough region for privcmd mmap is not found.\n");
 +	}
 +
 +out:
-+	printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
++	printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
++	       "[0x%lx, 0x%lx] (%ldMB)\n",
 +	       privcmd_resource_min, privcmd_resource_max, 
 +	       (privcmd_resource_max - privcmd_resource_min) >> 20);
 +	BUG_ON(privcmd_resource_min >= privcmd_resource_max);
 +
-+	// XXX this should be somewhere appropriate
++	/* XXX this should be somewhere appropriate */
 +	(void)p2m_expose_init();
 +
 +	return 0;
@@ -25127,8 +27872,12 @@
 +
 +struct xen_ia64_privcmd_range {
 +	atomic_t			ref_count;
-+	unsigned long			pgoff; // in PAGE_SIZE
-+	struct resource*		res;
++	unsigned long			pgoff; /* in PAGE_SIZE */
++	struct resource			*res;
++
++	/* for foreign domain p2m mapping */
++	void				*private;
++	void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
 +
 +	unsigned long			num_entries;
 +	struct xen_ia64_privcmd_entry	entries[0];
@@ -25136,30 +27885,30 @@
 +
 +struct xen_ia64_privcmd_vma {
 +	int				is_privcmd_mmapped;
-+	struct xen_ia64_privcmd_range*	range;
++	struct xen_ia64_privcmd_range	*range;
 +
 +	unsigned long			num_entries;
-+	struct xen_ia64_privcmd_entry*	entries;
++	struct xen_ia64_privcmd_entry	*entries;
 +};
 +
 +static void
-+xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
++xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
 +{
 +	atomic_set(&entry->map_count, 0);
 +	entry->gpfn = INVALID_GPFN;
 +}
 +
 +static int
-+xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
++xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
 +			    unsigned long addr,
-+			    struct xen_ia64_privcmd_range* privcmd_range,
++			    struct xen_ia64_privcmd_range *privcmd_range,
 +			    int i,
 +			    unsigned long gmfn,
 +			    pgprot_t prot,
 +			    domid_t domid)
 +{
 +	int error = 0;
-+	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
 +	unsigned long gpfn;
 +	unsigned long flags;
 +
@@ -25175,21 +27924,24 @@
 +	gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
 +
 +	flags = ASSIGN_writable;
-+	if (pgprot_val(prot) == PROT_READ) {
++	if (pgprot_val(prot) == PROT_READ)
 +		flags = ASSIGN_readonly;
-+	}
 +	error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
-+	if (error != 0) {
++	if (error != 0)
 +		goto out;
-+	}
 +
 +	prot = vma->vm_page_prot;
 +	error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
++	/*
++	 * VM_PFNMAP is set in remap_pfn_range().
++	 * Reset the flag to avoid BUG_ON() in do_no_page().
++	 */
++	vma->vm_flags &= ~VM_PFNMAP;
++
 +	if (error != 0) {
 +		error = HYPERVISOR_zap_physmap(gpfn, 0);
-+		if (error) {
-+			BUG();//XXX
-+		}
++		if (error)
++			BUG(); /* XXX */
 +	} else {
 +		atomic_inc(&entry->map_count);
 +		entry->gpfn = gpfn;
@@ -25200,60 +27952,67 @@
 +}
 +
 +static void
-+xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
 +			      int i)
 +{
-+	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
 +	unsigned long gpfn = entry->gpfn;
-+	//gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
-+	//	(vma->vm_pgoff - privcmd_range->pgoff);
++	/* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
++  		(vma->vm_pgoff - privcmd_range->pgoff); */
 +	int error;
 +
 +	error = HYPERVISOR_zap_physmap(gpfn, 0);
-+	if (error) {
-+		BUG();//XXX
-+	}
++	if (error)
++		BUG(); /* XXX */
 +	entry->gpfn = INVALID_GPFN;
 +}
 +
 +static void
-+xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
 +			    int i)
 +{
-+	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
-+	if (entry->gpfn != INVALID_GPFN) {
++	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++	if (entry->gpfn != INVALID_GPFN)
 +		atomic_inc(&entry->map_count);
-+	} else {
++	else
 +		BUG_ON(atomic_read(&entry->map_count) != 0);
-+	}
 +}
 +
 +static void
-+xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
 +			     int i)
 +{
-+	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
 +	if (entry->gpfn != INVALID_GPFN &&
-+	    atomic_dec_and_test(&entry->map_count)) {
++	    atomic_dec_and_test(&entry->map_count))
 +		xen_ia64_privcmd_entry_munmap(privcmd_range, i);
-+	}
 +}
 +
-+static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
-+static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
++static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
++static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
++
++static struct page *
++xen_ia64_privcmd_vma_nopage(struct vm_area_struct *vma,
++			    unsigned long address,
++			    int *type)
++{
++	return NOPAGE_SIGBUS;
++}
 +
 +struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
-+	.open = &xen_ia64_privcmd_vma_open,
-+	.close = &xen_ia64_privcmd_vma_close,
++	.open = xen_ia64_privcmd_vma_open,
++	.close = xen_ia64_privcmd_vma_close,
++	.nopage = xen_ia64_privcmd_vma_nopage
 +};
 +
 +static void
-+__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
-+			    struct xen_ia64_privcmd_vma* privcmd_vma,
-+			    struct xen_ia64_privcmd_range* privcmd_range)
++__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
++			    struct xen_ia64_privcmd_vma *privcmd_vma,
++			    struct xen_ia64_privcmd_range *privcmd_range)
 +{
 +	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
-+	unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++	unsigned long num_entries =
++		(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 +	unsigned long i;
 +
 +	BUG_ON(entry_offset < 0);
@@ -25263,41 +28022,43 @@
 +	privcmd_vma->num_entries = num_entries;
 +	privcmd_vma->entries = &privcmd_range->entries[entry_offset];
 +	vma->vm_private_data = privcmd_vma;
-+	for (i = 0; i < privcmd_vma->num_entries; i++) {
++	for (i = 0; i < privcmd_vma->num_entries; i++)
 +		xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
-+	}
 +
 +	vma->vm_private_data = privcmd_vma;
 +	vma->vm_ops = &xen_ia64_privcmd_vm_ops;
 +}
 +
 +static void
-+xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
++xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
 +{
-+	struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-+	struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-+	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++	struct xen_ia64_privcmd_vma *old_privcmd_vma =
++		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++	struct xen_ia64_privcmd_vma *privcmd_vma =
++		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
 +
 +	atomic_inc(&privcmd_range->ref_count);
-+	// vm_op->open() can't fail.
++	/* vm_op->open() can't fail. */
 +	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
-+	// copy original value if necessary
++	/* copy original value if necessary */
 +	privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
 +
 +	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
 +}
 +
 +static void
-+xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
++xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
 +{
-+	struct xen_ia64_privcmd_vma* privcmd_vma =
++	struct xen_ia64_privcmd_vma *privcmd_vma =
 +		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-+	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
 +	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
 +	unsigned long i;
 +
 +	for (i = 0; i < privcmd_vma->num_entries; i++) {
 +		xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
++		cond_resched();
 +	}
 +	vma->vm_private_data = NULL;
 +	kfree(privcmd_vma);
@@ -25305,12 +28066,15 @@
 +	if (atomic_dec_and_test(&privcmd_range->ref_count)) {
 +#if 1
 +		for (i = 0; i < privcmd_range->num_entries; i++) {
-+			struct xen_ia64_privcmd_entry* entry =
++			struct xen_ia64_privcmd_entry *entry =
 +				&privcmd_range->entries[i];
 +			BUG_ON(atomic_read(&entry->map_count) != 0);
 +			BUG_ON(entry->gpfn != INVALID_GPFN);
 +		}
 +#endif
++		if (privcmd_range->callback)
++			(*privcmd_range->callback)(privcmd_range,
++						   privcmd_range->private);
 +		release_resource(privcmd_range->res);
 +		kfree(privcmd_range->res);
 +		vfree(privcmd_range);
@@ -25320,7 +28084,7 @@
 +int
 +privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
 +{
-+	struct xen_ia64_privcmd_vma* privcmd_vma =
++	struct xen_ia64_privcmd_vma *privcmd_vma =
 +		(struct xen_ia64_privcmd_vma *)vma->vm_private_data;
 +	return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
 +}
@@ -25331,9 +28095,9 @@
 +	int error;
 +	unsigned long size = vma->vm_end - vma->vm_start;
 +	unsigned long num_entries = size >> PAGE_SHIFT;
-+	struct xen_ia64_privcmd_range* privcmd_range = NULL;
-+	struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
-+	struct resource* res = NULL;
++	struct xen_ia64_privcmd_range *privcmd_range = NULL;
++	struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
++	struct resource *res = NULL;
 +	unsigned long i;
 +	BUG_ON(!is_running_on_xen());
 +
@@ -25343,37 +28107,34 @@
 +	privcmd_range =
 +		vmalloc(sizeof(*privcmd_range) +
 +			sizeof(privcmd_range->entries[0]) * num_entries);
-+	if (privcmd_range == NULL) {
++	if (privcmd_range == NULL)
 +		goto out_enomem0;
-+	}
 +	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
-+	if (privcmd_vma == NULL) {
++	if (privcmd_vma == NULL)
 +		goto out_enomem1;
-+	}
 +	privcmd_vma->is_privcmd_mmapped = 0;
 +
 +	res = kzalloc(sizeof(*res), GFP_KERNEL);
-+	if (res == NULL) {
++	if (res == NULL)
 +		goto out_enomem1;
-+	}
 +	res->name = "Xen privcmd mmap";
 +	error = allocate_resource(&iomem_resource, res, size,
 +				  privcmd_resource_min, privcmd_resource_max,
 +				  privcmd_resource_align, NULL, NULL);
-+	if (error) {
++	if (error)
 +		goto out_enomem1;
-+	}
 +	privcmd_range->res = res;
 +
 +	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
-+	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
++	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
 +
 +	atomic_set(&privcmd_range->ref_count, 1);
 +	privcmd_range->pgoff = vma->vm_pgoff;
 +	privcmd_range->num_entries = num_entries;
-+	for (i = 0; i < privcmd_range->num_entries; i++) {
++	privcmd_range->private = NULL;
++	privcmd_range->callback = NULL;
++	for (i = 0; i < privcmd_range->num_entries; i++)
 +		xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
-+	}
 +
 +	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
 +	return 0;
@@ -25388,15 +28149,15 @@
 +
 +int
 +direct_remap_pfn_range(struct vm_area_struct *vma,
-+		       unsigned long address,	// process virtual address
-+		       unsigned long gmfn,	// gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
++		       unsigned long address,	/* process virtual address */
++		       unsigned long gmfn,	/* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
 +		       unsigned long size,
 +		       pgprot_t prot,
-+		       domid_t  domid)		// target domain
++		       domid_t  domid)		/* target domain */
 +{
-+	struct xen_ia64_privcmd_vma* privcmd_vma =
++	struct xen_ia64_privcmd_vma *privcmd_vma =
 +		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-+	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
 +	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
 +
 +	unsigned long i;
@@ -25405,28 +28166,27 @@
 +	BUG_ON(!is_running_on_xen());
 +
 +#if 0
-+	if (prot != vm->vm_page_prot) {
++	if (prot != vm->vm_page_prot)
 +		return -EINVAL;
-+	}
 +#endif
 +
 +	i = (address - vma->vm_start) >> PAGE_SHIFT;
 +	for (offset = 0; offset < size; offset += PAGE_SIZE) {
 +		error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
-+		if (error != 0) {
++		if (error != 0)
 +			break;
-+		}
 +
 +		i++;
 +		gmfn++;
-+        }
++	}
 +
 +	return error;
 +}
 +
 +
-+///////////////////////////////////////////////////////////////////////////
-+// expose p2m table
++/**************************************************************************
++ * expose p2m table
++ */
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
 +#include <linux/cpu.h>
 +#include <asm/uaccess.h>
@@ -25444,12 +28204,13 @@
 +};
 +static unsigned long p2m_assign_start_pfn __read_mostly;
 +static unsigned long p2m_assign_end_pfn __read_mostly;
-+static unsigned long p2m_expose_size;	// this is referenced only when resume.
-+					// so __read_mostly doesn't make sense.
-+volatile const pte_t* p2m_pte __read_mostly;
++static unsigned long p2m_expose_size;	/* this is referenced only when resume.
++					 * so __read_mostly doesn't make sense.
++					 */
++volatile const pte_t *p2m_pte __read_mostly;
 +
-+#define GRNULE_PFN	PTRS_PER_PTE
-+static unsigned long p2m_granule_pfn __read_mostly = GRNULE_PFN;
++#define GRANULE_PFN	PTRS_PER_PTE
++static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
 +
 +#define ROUNDDOWN(x, y)  ((x) & ~((y) - 1))
 +#define ROUNDUP(x, y)    (((x) + (y) - 1) & ~((y) - 1))
@@ -25459,13 +28220,13 @@
 +static int xen_ia64_p2m_expose __read_mostly = 1;
 +module_param(xen_ia64_p2m_expose, int, 0);
 +MODULE_PARM_DESC(xen_ia64_p2m_expose,
-+                 "enable/disable xen/ia64 p2m exposure optimization\n");
++		 "enable/disable xen/ia64 p2m exposure optimization\n");
 +
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 +static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
 +module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
 +MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
-+                 "use/unuse dtr to map exposed p2m table\n");
++		 "use/unuse dtr to map exposed p2m table\n");
 +
 +static const int p2m_page_shifts[] = {
 +	_PAGE_SIZE_4K,
@@ -25487,21 +28248,21 @@
 +};
 +static struct p2m_itr_arg p2m_itr_arg __read_mostly;
 +
-+// This should be in asm-ia64/kregs.h
++/* This should be in asm-ia64/kregs.h */
 +#define IA64_TR_P2M_TABLE	3
 +
 +static void
-+p2m_itr(void* info)
++p2m_itr(void *info)
 +{
-+	struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
++	struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
 +	ia64_itr(0x2, IA64_TR_P2M_TABLE,
-+	         arg->vaddr, arg->pteval, arg->log_page_size);
++		 arg->vaddr, arg->pteval, arg->log_page_size);
 +	ia64_srlz_d();
 +}
 +
 +static int
 +p2m_expose_dtr_call(struct notifier_block *self,
-+                    unsigned long event, void* ptr)
++		    unsigned long event, void *ptr)
 +{
 +	unsigned int cpu = (unsigned int)(long)ptr;
 +	if (event != CPU_ONLINE)
@@ -25525,6 +28286,12 @@
 +};
 +#endif
 +
++static inline unsigned long
++p2m_table_size(unsigned long num_pfn)
++{
++	return ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
++}
++
 +static int
 +p2m_expose_init(void)
 +{
@@ -25534,7 +28301,6 @@
 +	int error = 0;
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 +	int i;
-+	unsigned long page_size;
 +	unsigned long log_page_size = 0;
 +#endif
 +
@@ -25563,8 +28329,9 @@
 +
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 +	if (xen_ia64_p2m_expose_use_dtr) {
++		unsigned long page_size = 0;
 +		unsigned long granule_pfn = 0;
-+		p2m_size = p2m_max_low_pfn - p2m_min_low_pfn;
++		p2m_size = p2m_table_size(p2m_max_low_pfn - p2m_min_low_pfn);
 +		for (i = 0;
 +		     i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
 +		     i++) {
@@ -25574,15 +28341,16 @@
 +				continue;
 +
 +			granule_pfn = max(page_size >> PAGE_SHIFT,
-+			                  p2m_granule_pfn);
++					  p2m_granule_pfn);
 +			p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-+			                                granule_pfn);
++							granule_pfn);
 +			p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
-+			                              granule_pfn);
++						      granule_pfn);
 +			num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
 +			p2m_expose_size = num_pfn << PAGE_SHIFT;
-+			p2m_size = num_pfn / PTRS_PER_PTE;
-+			p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
++			p2m_size = p2m_table_size(num_pfn);
++			p2m_size = ROUNDUP(p2m_size,
++					   granule_pfn << PAGE_SHIFT);
 +			if (p2m_size == page_size)
 +				break;
 +		}
@@ -25597,24 +28365,25 @@
 +	{
 +		BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
 +		p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-+		                                p2m_granule_pfn);
-+		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
++						p2m_granule_pfn);
++		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
++					      p2m_granule_pfn);
 +		num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
 +		p2m_expose_size = num_pfn << PAGE_SHIFT;
-+		p2m_size = num_pfn / PTRS_PER_PTE;
++		p2m_size = p2m_table_size(num_pfn);
 +		p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
 +		align = max(privcmd_resource_align,
-+		            p2m_granule_pfn << PAGE_SHIFT);
++			    p2m_granule_pfn << PAGE_SHIFT);
 +	}
 +	
-+	// use privcmd region
++	/* use privcmd region */
 +	error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
-+	                          privcmd_resource_min, privcmd_resource_max,
-+	                          align, NULL, NULL);
++				  privcmd_resource_min, privcmd_resource_max,
++				  align, NULL, NULL);
 +	if (error) {
 +		printk(KERN_ERR P2M_PREFIX
 +		       "can't allocate region for p2m exposure "
-+		       "[0x%016lx, 0x%016lx) 0x%016lx\n",
++		       "[0x%016lx, 0x%016lx] 0x%016lx\n",
 +		       p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
 +		goto out;
 +	}
@@ -25623,8 +28392,8 @@
 +	p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
 +	
 +	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-+	                              p2m_assign_start_pfn,
-+	                              p2m_expose_size, p2m_granule_pfn);
++				      p2m_assign_start_pfn,
++				      p2m_expose_size, p2m_granule_pfn);
 +	if (error) {
 +		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
 +		       error);
@@ -25639,9 +28408,9 @@
 +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 +	if (xen_ia64_p2m_expose_use_dtr) {
 +		p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
-+		                                        << PAGE_SHIFT);
++							<< PAGE_SHIFT);
 +		p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
-+		                                     PAGE_KERNEL));
++						     PAGE_KERNEL));
 +		p2m_itr_arg.log_page_size = log_page_size;
 +		smp_mb();
 +		smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
@@ -25652,10 +28421,10 @@
 +	p2m_initialized = 1;
 +	printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
 +	       p2m_convert_min_pfn << PAGE_SHIFT,
-+	       p2m_convert_max_pfn << PAGE_SHIFT);
++	       (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
 +	printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
 +	       p2m_assign_start_pfn << PAGE_SHIFT,
-+	       p2m_assign_end_pfn << PAGE_SHIFT,
++	       (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
 +	       p2m_size / 1024);
 +out:
 +	unlock_cpu_hotplug();
@@ -25689,8 +28458,8 @@
 +	 * interrupts are masked when resume.
 +	 */
 +	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-+	                              p2m_assign_start_pfn,
-+	                              p2m_expose_size, p2m_granule_pfn);
++				      p2m_assign_start_pfn,
++				      p2m_expose_size, p2m_granule_pfn);
 +	if (error) {
 +		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
 +		       error);
@@ -25717,11 +28486,11 @@
 +	}
 +}
 +
-+//XXX inlinize?
++/* XXX inlinize? */
 +unsigned long
 +p2m_phystomach(unsigned long gpfn)
 +{
-+	volatile const pte_t* pte;
++	volatile const pte_t *pte;
 +	unsigned long mfn;
 +	unsigned long pteval;
 +	
@@ -25733,8 +28502,8 @@
 +
 +	mfn = INVALID_MFN;
 +	if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
-+	           pte_present(__pte(pteval)) &&
-+	           pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
++		   pte_present(__pte(pteval)) &&
++		   pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
 +		mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
 +
 +	return mfn;
@@ -25747,26 +28516,230 @@
 +EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
 +EXPORT_SYMBOL_GPL(p2m_pte);
 +EXPORT_SYMBOL_GPL(p2m_phystomach);
-+#endif
 +
-+///////////////////////////////////////////////////////////////////////////
-+// for xenoprof
++/**************************************************************************
++ * foreign domain p2m mapping
++ */
++#include <asm/xen/xencomm.h>
++#include <xen/public/privcmd.h>
++
++struct foreign_p2m_private {
++	unsigned long	gpfn;
++	domid_t		domid;
++};
++
++static void
++xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
++			 void *arg)
++{
++	struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
++	int ret;
++
++	privcmd_range->private = NULL;
++	privcmd_range->callback = NULL;
++
++	ret = HYPERVISOR_unexpose_foreign_p2m(private->gpfn, private->domid);
++	if (ret)
++		printk(KERN_WARNING
++		       "unexpose_foreign_p2m hypercall failed.\n");
++	kfree(private);
++}
++
++int
++xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
++{
++	/*
++	 * hypercall->
++	 * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
++	 * arg1: va
++	 * arg2: domid
++	 * arg3: __user* memmap_info
++	 * arg4: flags
++	 */
++
++	int ret = 0;
++	struct mm_struct *mm = current->mm;
++
++	unsigned long vaddr = hypercall->arg[1];
++	domid_t domid = hypercall->arg[2];
++	struct xen_ia64_memmap_info __user *u_memmap_info =
++		(struct xen_ia64_memmap_info __user *)hypercall->arg[3];
++
++	struct xen_ia64_memmap_info memmap_info;
++	size_t memmap_size;
++	struct xen_ia64_memmap_info *k_memmap_info = NULL;
++	unsigned long max_gpfn;
++	unsigned long p2m_size;
++	struct resource *res;
++	unsigned long gpfn;
++
++	struct vm_area_struct *vma;
++	void *p;
++	unsigned long prev_src_gpfn_end;
++
++	struct xen_ia64_privcmd_vma *privcmd_vma;
++	struct xen_ia64_privcmd_range *privcmd_range;
++	struct foreign_p2m_private *private = NULL;
++
++	BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
++
++	private = kmalloc(sizeof(*private), GFP_KERNEL);
++	if (private == NULL)
++		goto kfree_out;
++
++	if (copy_from_user(&memmap_info, u_memmap_info, sizeof(memmap_info)))
++		return -EFAULT;
++	/* memmap_info integrity check */
++	if (memmap_info.efi_memdesc_size < sizeof(efi_memory_desc_t) ||
++	    memmap_info.efi_memmap_size < memmap_info.efi_memdesc_size ||
++	    (memmap_info.efi_memmap_size % memmap_info.efi_memdesc_size)
++	    != 0) {
++		ret = -EINVAL;
++		goto kfree_out;
++	}
++
++	memmap_size = sizeof(*k_memmap_info) + memmap_info.efi_memmap_size;
++	k_memmap_info = kmalloc(memmap_size, GFP_KERNEL);
++	if (k_memmap_info == NULL)
++		return -ENOMEM;
++	if (copy_from_user(k_memmap_info, u_memmap_info, memmap_size)) {
++		ret = -EFAULT;
++		goto kfree_out;
++	}
++	/* k_memmap_info integrity check is done by the expose foreng p2m 
++	   hypercall */
++
++	max_gpfn = HYPERVISOR_memory_op(XENMEM_maximum_gpfn, &domid);
++	if (max_gpfn < 0) {
++		ret = max_gpfn;
++		goto kfree_out;
++	}
++	p2m_size = p2m_table_size(max_gpfn + 1);
++
++	down_write(&mm->mmap_sem);
++
++	vma = find_vma(mm, vaddr);
++	if (vma == NULL || vma->vm_ops != &xen_ia64_privcmd_vm_ops ||
++	    vaddr != vma->vm_start ||
++	    (vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC) ||
++	    !privcmd_enforce_singleshot_mapping(vma))
++		goto mmap_out;
++
++	privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++	res = privcmd_vma->range->res;
++	if (p2m_size > (res->end - res->start + 1) ||
++	    p2m_size > vma->vm_end - vma->vm_start) {
++		ret = -EINVAL;
++		goto mmap_out;
++	}
++	
++	gpfn = res->start >> PAGE_SHIFT;
++	/*
++	 * arg0: dest_gpfn
++	 * arg1: domid
++	 * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
++	 * arg3: flags
++	 * The hypercall checks its intergirty/simplfies it and 
++	 * copy it back for us.
++	 */
++	ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
++	      xencomm_map_no_alloc(k_memmap_info, memmap_size),
++	      hypercall->arg[4]);
++	if (ret)
++		goto mmap_out;
++
++	privcmd_range = (struct xen_ia64_privcmd_range*)privcmd_vma->range;
++	prev_src_gpfn_end = 0;
++	for (p = k_memmap_info->memdesc;
++	     p < (void*)&k_memmap_info->memdesc[0] +
++		     k_memmap_info->efi_memmap_size;
++	     p += k_memmap_info->efi_memdesc_size) {
++		efi_memory_desc_t* md = p;
++		unsigned long src_gpfn = md->phys_addr >> PAGE_SHIFT;
++		unsigned long src_gpfn_end =
++			(md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
++			PAGE_SHIFT;
++		unsigned long num_src_gpfn;
++		unsigned long gpfn_offset;
++		unsigned long size;
++		unsigned int i;
++
++		if (src_gpfn <= prev_src_gpfn_end)
++			src_gpfn = prev_src_gpfn_end + 1;
++		if (src_gpfn_end <= prev_src_gpfn_end)
++			continue;
++
++		src_gpfn &= ~(PTRS_PER_PTE - 1);
++		src_gpfn_end = (src_gpfn_end + PTRS_PER_PTE - 1) &
++			~(PTRS_PER_PTE - 1);
++		num_src_gpfn = src_gpfn_end - src_gpfn;
++		gpfn_offset = src_gpfn / PTRS_PER_PTE;
++		size = p2m_table_size(num_src_gpfn);
++
++		prev_src_gpfn_end = src_gpfn_end;
++		ret = remap_pfn_range(vma,
++				      vaddr + (gpfn_offset << PAGE_SHIFT), 
++				      gpfn + gpfn_offset, size,
++				      vma->vm_page_prot);
++		if (ret) {
++			for (i = 0; i < gpfn + gpfn_offset; i++) {
++				struct xen_ia64_privcmd_entry *entry =
++					&privcmd_range->entries[i];
++				BUG_ON(atomic_read(&entry->map_count) != 1 &&
++				       atomic_read(&entry->map_count) != 0);
++				atomic_set(&entry->map_count, 0);
++				entry->gpfn = INVALID_GPFN;
++			}
++			(void)HYPERVISOR_unexpose_foreign_p2m(gpfn, domid);
++			goto mmap_out;
++		}
++
++		for (i = gpfn_offset;
++		     i < gpfn_offset + (size >> PAGE_SHIFT);
++		     i++) {
++			struct xen_ia64_privcmd_entry *entry =
++				&privcmd_range->entries[i];
++			BUG_ON(atomic_read(&entry->map_count) != 0);
++			BUG_ON(entry->gpfn != INVALID_GPFN);
++			atomic_inc(&entry->map_count);
++			entry->gpfn = gpfn + i;
++		}
++	}
++
++	private->gpfn = gpfn;
++	private->domid = domid;
++
++	privcmd_range->callback = &xen_foreign_p2m_unexpose;
++	privcmd_range->private = private;
++
++mmap_out:
++	up_write(&mm->mmap_sem);
++kfree_out:
++	kfree(k_memmap_info);
++	if (ret != 0)
++		kfree(private);
++	return ret;
++}
++#endif
 +
++/**************************************************************************
++ * for xenoprof
++ */
 +struct resource*
 +xen_ia64_allocate_resource(unsigned long size)
 +{
-+	struct resource* res;
++	struct resource *res;
 +	int error;
 +	
-+	res = kmalloc(sizeof(*res), GFP_KERNEL);
++	res = kzalloc(sizeof(*res), GFP_KERNEL);
 +	if (res == NULL)
 +		return ERR_PTR(-ENOMEM);
 +
 +	res->name = "Xen";
 +	res->flags = IORESOURCE_MEM;
 +	error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
-+	                          privcmd_resource_min, privcmd_resource_max,
-+	                          IA64_GRANULE_SIZE, NULL, NULL);
++				  privcmd_resource_min, privcmd_resource_max,
++				  IA64_GRANULE_SIZE, NULL, NULL);
 +	if (error) {
 +		kfree(res);
 +		return ERR_PTR(error);
@@ -25776,7 +28749,7 @@
 +EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
 +
 +void
-+xen_ia64_release_resource(struct resource* res)
++xen_ia64_release_resource(struct resource *res)
 +{
 +	release_resource(res);
 +	kfree(res);
@@ -25784,7 +28757,7 @@
 +EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
 +
 +void
-+xen_ia64_unmap_resource(struct resource* res)
++xen_ia64_unmap_resource(struct resource *res)
 +{
 +	unsigned long gpfn = res->start >> PAGE_SHIFT;
 +	unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
@@ -25801,8 +28774,25 @@
 +}
 +EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
 +
-+///////////////////////////////////////////////////////////////////////////
-+// suspend/resume
++/**************************************************************************
++ * opt feature
++ */
++void
++xen_ia64_enable_opt_feature(void)
++{
++	/* Enable region 7 identity map optimizations in Xen */
++	struct xen_ia64_opt_feature optf;
++
++	optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
++	optf.on = XEN_IA64_OPTF_ON;
++	optf.pgprot = pgprot_val(PAGE_KERNEL);
++	optf.key = 0;	/* No key on linux. */
++	HYPERVISOR_opt_feature(&optf);
++}
++
++/**************************************************************************
++ * suspend/resume
++ */
 +void
 +xen_post_suspend(int suspend_cancelled)
 +{
@@ -25810,1060 +28800,1068 @@
 +		return;
 +	
 +	p2m_expose_resume();
++	xen_ia64_enable_opt_feature();
 +	/* add more if necessary */
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/Makefile tmp-linux-2.6-xen.patch/arch/ia64/xen/Makefile
---- pristine-linux-2.6.18.2/arch/ia64/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,9 @@
-+#
-+# Makefile for Xen components
-+#
-+
-+obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o \
-+	 hypervisor.o pci-dma-xen.o util.o xencomm.o xcom_hcall.o \
-+	 xcom_mini.o xcom_privcmd.o mem.o
-+
-+pci-dma-xen-y := ../../i386/kernel/pci-dma-xen.o
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/mem.c tmp-linux-2.6-xen.patch/arch/ia64/xen/mem.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/mem.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/mem.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,75 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/machvec.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/machvec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,4 @@
++#define MACHVEC_PLATFORM_NAME           xen
++#define MACHVEC_PLATFORM_HEADER         <asm/machvec_xen.h>
++#include <asm/machvec_init.h>
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/swiotlb.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/swiotlb.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,906 @@
 +/*
-+ *  Originally from linux/drivers/char/mem.c
++ * Dynamic DMA mapping support.
 + *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
++ * This implementation is for IA-64 and EM64T platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ *	David Mosberger-Tang <davidm at hpl.hp.com>
 + *
-+ *  Added devfs support. 
-+ *    Jan-11-1998, C. Scott Ananian <cananian at alumni.princeton.edu>
-+ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj at sgi.com>
-+ */
-+/*
-+ * taken from
-+ * linux/drivers/char/mem.c and linux-2.6-xen-sparse/drivers/xen/char/mem.c.
-+ * adjusted for IA64 and made transparent.
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
++ * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
++ * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
++ *			unnecessary i-cache flushing.
++ * 04/07/.. ak		Better overflow handling. Assorted fixes.
++ * 05/09/10 linville	Add support for syncing ranges, support syncing for
++ *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
 + */
 +
++#include <linux/cache.h>
++#include <linux/dma-mapping.h>
 +#include <linux/mm.h>
-+#include <linux/efi.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
 +
-+/*
-+ * Architectures vary in how they handle caching for addresses
-+ * outside of main memory.
-+ *
-+ */
-+static inline int uncached_access(struct file *file, unsigned long addr)
-+{
-+	/*
-+	 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
-+	 */
-+	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-+}
++#include <asm/io.h>
++#include <asm/dma.h>
++#include <asm/scatterlist.h>
 +
-+int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
-+{
-+	unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
-+	size_t size = vma->vm_end - vma->vm_start;
++#include <linux/init.h>
++#include <linux/bootmem.h>
 +
-+
-+#if 0
-+	/*
-+	 *XXX FIXME: linux-2.6.16.29, linux-2.6.17
-+	 *    valid_mmap_phys_addr_range() in linux/arch/ia64/kernel/efi.c
-+	 *    fails checks.
-+	 *    linux-2.6.18.1's returns always 1. 
-+	 *    Its comments says
-+	 *
-+         * MMIO regions are often missing from the EFI memory map.
-+         * We must allow mmap of them for programs like X, so we
-+         * currently can't do any useful validation.
-+         */
-+	if (!valid_mmap_phys_addr_range(addr, &size))
-+		return -EINVAL;
-+	if (size < vma->vm_end - vma->vm_start)
-+		return -EINVAL;
++#ifdef CONFIG_XEN
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++/*
++ * What DMA mask should Xen use to remap the bounce buffer pool?  Most
++ * reports seem to indicate 30 bits is sufficient, except maybe for old
++ * sound cards that we probably don't care about anyway.  If we need to,
++ * we could put in some smarts to try to lower, but hopefully it's not
++ * necessary.
++ */
++#define DMA_BITS	(30)
 +#endif
 +
-+	if (is_running_on_xen()) {
-+		unsigned long offset = HYPERVISOR_ioremap(addr, size);
-+		if (IS_ERR_VALUE(offset))
-+			return offset;
-+	}
++#define OFFSET(val,align) ((unsigned long)	\
++	                   ( (val) & ( (align) - 1)))
 +
-+	if (uncached_access(file, vma->vm_pgoff << PAGE_SHIFT))
-+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++#define SG_ENT_VIRT_ADDRESS(sg)	(page_address((sg)->page) + (sg)->offset)
++#define SG_ENT_PHYS_ADDRESS(SG)	virt_to_bus(SG_ENT_VIRT_ADDRESS(SG))
 +
-+        /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
-+        if (remap_pfn_range(vma,
-+                            vma->vm_start,
-+                            vma->vm_pgoff,
-+                            size,
-+                            vma->vm_page_prot))
-+                return -EAGAIN;
-+        return 0;
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/util.c tmp-linux-2.6-xen.patch/arch/ia64/xen/util.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/util.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/util.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,105 @@
-+/******************************************************************************
-+ * arch/ia64/xen/util.c
-+ * This file is the ia64 counterpart of drivers/xen/util.c
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ *
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2.  What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
 + */
++#define IO_TLB_SEGSIZE	128
 +
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/memory.h>
-+#include <asm/hypercall.h>
++/*
++ * log of the size of each IO TLB slab.  The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
 +
-+struct vm_struct *alloc_vm_area(unsigned long size)
-+{
-+	int order;
-+	unsigned long virt;
-+	unsigned long nr_pages;
-+	struct vm_struct* area;
-+	
-+	order = get_order(size);
-+	virt = __get_free_pages(GFP_KERNEL, order);
-+	if (virt == 0) {
-+		goto err0;
-+	}
-+	nr_pages = 1 << order;
-+	scrub_pages(virt, nr_pages);
-+	
-+	area = kmalloc(sizeof(*area), GFP_KERNEL);
-+	if (area == NULL) {
-+		goto err1;
-+	}
-+	
-+        area->flags = VM_IOREMAP;//XXX
-+        area->addr = (void*)virt;
-+        area->size = size;
-+        area->pages = NULL; //XXX
-+        area->nr_pages = nr_pages;
-+        area->phys_addr = 0; 	/* xenbus_map_ring_valloc uses this field!  */
++#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 +
-+	return area;
++/*
++ * Minimum IO TLB size to bother booting with.  Systems with mainly
++ * 64bit capable cards will only lightly use the swiotlb.  If we can't
++ * allocate a contiguous 1MB, we're probably in trouble anyway.
++ */
++#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 +
-+err1:
-+	free_pages(virt, order);
-+err0:
-+	return NULL;
-+	
-+}
-+EXPORT_SYMBOL_GPL(alloc_vm_area);
++/*
++ * Enumeration for sync targets
++ */
++enum dma_sync_target {
++	SYNC_FOR_CPU = 0,
++	SYNC_FOR_DEVICE = 1,
++};
 +
-+void free_vm_area(struct vm_struct *area)
-+{
-+	unsigned int order = get_order(area->size);
-+	unsigned long i;
-+	unsigned long phys_addr = __pa(area->addr);
++int swiotlb_force;
 +
-+	// This area is used for foreign page mappping.
-+	// So underlying machine page may not be assigned.
-+	for (i = 0; i < (1 << order); i++) {
-+		unsigned long ret;
-+		unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
-+		struct xen_memory_reservation reservation = {
-+			.nr_extents   = 1,
-+			.address_bits = 0,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		};
-+		set_xen_guest_handle(reservation.extent_start, &gpfn);
-+		ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+					   &reservation);
-+		BUG_ON(ret != 1);
-+	}
-+	free_pages((unsigned long)area->addr, order);
-+	kfree(area);
-+}
-+EXPORT_SYMBOL_GPL(free_vm_area);
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static char *io_tlb_start, *io_tlb_end;
 +
 +/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
++ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
++ * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 + */
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xcom_hcall.c tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_hcall.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/xcom_hcall.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_hcall.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,383 @@
++static unsigned long io_tlb_nslabs;
++
 +/*
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
-+ *
-+ *          Tristan Gingold <tristan.gingold at bull.net>
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
 + */
-+#include <linux/types.h>
-+#include <linux/errno.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/module.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/memory.h>
-+#include <xen/interface/xencomm.h>
-+#include <xen/interface/version.h>
-+#include <xen/interface/sched.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/callback.h>
-+#include <xen/interface/acm_ops.h>
-+#include <xen/interface/hvm/params.h>
-+#include <xen/interface/xenoprof.h>
-+#include <xen/interface/vcpu.h>
-+#include <asm/hypercall.h>
-+#include <asm/page.h>
-+#include <asm/uaccess.h>
-+#include <asm/xen/xencomm.h>
-+#include <asm/perfmon.h>
++static unsigned long io_tlb_overflow = 32*1024;
 +
-+/* Xencomm notes:
-+ * This file defines hypercalls to be used by xencomm.  The hypercalls simply
-+ * create inlines descriptors for pointers and then call the raw arch hypercall
-+ * xencomm_arch_hypercall_XXX
-+ *
-+ * If the arch wants to directly use these hypercalls, simply define macros
-+ * in asm/hypercall.h, eg:
-+ *  #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
-+ * 
-+ * The arch may also define HYPERVISOR_xxx as a function and do more operations
-+ * before/after doing the hypercall.
-+ *
-+ * Note: because only inline descriptors are created these functions must only
-+ * be called with in kernel memory parameters.
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
 + */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
 +
-+int
-+xencomm_hypercall_console_io(int cmd, int count, char *str)
-+{
-+	return xencomm_arch_hypercall_console_io
-+		(cmd, count, xencomm_create_inline(str));
-+}
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static unsigned char **io_tlb_orig_addr;
 +
-+int
-+xencomm_hypercall_event_channel_op(int cmd, void *op)
-+{
-+	return xencomm_arch_hypercall_event_channel_op
-+		(cmd, xencomm_create_inline(op));
-+}
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
 +
-+int
-+xencomm_hypercall_xen_version(int cmd, void *arg)
++static int __init
++setup_io_tlb_npages(char *str)
 +{
-+	switch (cmd) {
-+	case XENVER_version:
-+	case XENVER_extraversion:
-+	case XENVER_compile_info:
-+	case XENVER_capabilities:
-+	case XENVER_changeset:
-+	case XENVER_platform_parameters:
-+	case XENVER_pagesize:
-+	case XENVER_get_features:
-+		break;
-+	default:
-+		printk("%s: unknown version cmd %d\n", __func__, cmd);
-+		return -ENOSYS;
++	if (isdigit(*str)) {
++		io_tlb_nslabs = simple_strtoul(str, &str, 0);
++		/* avoid tail segment of size < IO_TLB_SEGSIZE */
++		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
 +	}
-+
-+	return xencomm_arch_hypercall_xen_version
-+		(cmd, xencomm_create_inline(arg));
++	if (*str == ',')
++		++str;
++	if (!strcmp(str, "force"))
++		swiotlb_force = 1;
++	return 1;
 +}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
 +
-+int
-+xencomm_hypercall_physdev_op(int cmd, void *op)
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
 +{
-+	return xencomm_arch_hypercall_physdev_op
-+		(cmd, xencomm_create_inline(op));
-+}
++	unsigned long i;
 +
-+static void *
-+xencommize_grant_table_op(unsigned int cmd, void *op, unsigned int count)
-+{
-+	switch (cmd) {
-+	case GNTTABOP_map_grant_ref:
-+	case GNTTABOP_unmap_grant_ref:
-+		break;
-+	case GNTTABOP_setup_table:
-+	{
-+		struct gnttab_setup_table *setup = op;
-+		struct xencomm_handle *frame_list;
++	if (!io_tlb_nslabs) {
++		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
++	}
 +
-+		frame_list = xencomm_create_inline
-+			(xen_guest_handle(setup->frame_list));
++#ifdef CONFIG_XEN
++	if (is_running_on_xen())
++		io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
++#endif
++	/*
++	 * Get IO TLB memory from the low pages
++	 */
++	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++	if (!io_tlb_start)
++		panic("Cannot allocate SWIOTLB buffer");
++	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
 +
-+		set_xen_guest_handle(setup->frame_list, (void *)frame_list);
-+		break;
-+	}
-+	case GNTTABOP_dump_table:
-+	case GNTTABOP_transfer:
-+	case GNTTABOP_copy:
-+		break;
-+	default:
-+		printk("%s: unknown grant table op %d\n", __func__, cmd);
-+		BUG();
++#ifdef CONFIG_XEN
++	for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
++		if (xen_create_contiguous_region(
++				(unsigned long)io_tlb_start +
++				(i << IO_TLB_SHIFT),
++				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++				DMA_BITS))
++			panic("Failed to setup Xen contiguous region");
 +	}
++#endif
 +
-+	return  xencomm_create_inline(op);
++	/*
++	 * Allocate and initialize the free list array.  This array is used
++	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++	 * between io_tlb_start and io_tlb_end.
++	 */
++	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
++	for (i = 0; i < io_tlb_nslabs; i++)
++ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++	io_tlb_index = 0;
++	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
++
++	/*
++	 * Get the overflow emergency buffer
++	 */
++	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++#ifdef CONFIG_XEN
++	if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
++					 get_order(io_tlb_overflow), DMA_BITS))
++		panic("Failed to setup Xen contiguous region for overflow");
++#endif
++	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
++	       virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
 +}
 +
-+int
-+xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, unsigned int count)
++void
++swiotlb_init (void)
 +{
-+	void *desc = xencommize_grant_table_op (cmd, op, count);
-+
-+	return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
++	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
 +}
 +
++/*
++ * Systems with larger DMA zones (those that don't support ISA) can
++ * initialize the swiotlb later using the slab allocator if needed.
++ * This should be just like above, but with some error catching.
++ */
 +int
-+xencomm_hypercall_sched_op(int cmd, void *arg)
++swiotlb_late_init_with_default_size (size_t default_size)
 +{
-+	switch (cmd) {
-+	case SCHEDOP_yield:
-+	case SCHEDOP_block:
-+	case SCHEDOP_shutdown:
-+	case SCHEDOP_remote_shutdown:
-+		break;
-+	case SCHEDOP_poll:
-+	{
-+		sched_poll_t *poll = arg;
-+		struct xencomm_handle *ports;
-+
-+		ports = xencomm_create_inline(xen_guest_handle(poll->ports));
++	unsigned long i, req_nslabs = io_tlb_nslabs;
++	unsigned int order;
 +
-+		set_xen_guest_handle(poll->ports, (void *)ports);
-+		break;
++	if (!io_tlb_nslabs) {
++		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
 +	}
-+	default:
-+		printk("%s: unknown sched op %d\n", __func__, cmd);
-+		return -ENOSYS;
-+	}
-+	
-+	return xencomm_arch_hypercall_sched_op(cmd, xencomm_create_inline(arg));
-+}
-+
-+int
-+xencomm_hypercall_multicall(void *call_list, int nr_calls)
-+{
-+	int i;
-+	multicall_entry_t *mce;
 +
-+	for (i = 0; i < nr_calls; i++) {
-+		mce = (multicall_entry_t *)call_list + i;
++#ifdef CONFIG_XEN
++	if (is_running_on_xen())
++		io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
++#endif
++	/*
++	 * Get IO TLB memory from the low pages
++	 */
++	order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++	io_tlb_nslabs = SLABS_PER_PAGE << order;
 +
-+		switch (mce->op) {
-+		case __HYPERVISOR_update_va_mapping:
-+		case __HYPERVISOR_mmu_update:
-+			/* No-op on ia64.  */
++	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
++		io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
++		                                        order);
++		if (io_tlb_start)
 +			break;
-+		case __HYPERVISOR_grant_table_op:
-+			mce->args[1] = (unsigned long)xencommize_grant_table_op
-+				(mce->args[0], (void *)mce->args[1],
-+				 mce->args[2]);
-+			break;
-+		case __HYPERVISOR_memory_op:
-+		default:
-+			printk("%s: unhandled multicall op entry op %lu\n",
-+			       __func__, mce->op);
-+			return -ENOSYS;
-+		}
++		order--;
 +	}
 +
-+	return xencomm_arch_hypercall_multicall
-+		(xencomm_create_inline(call_list), nr_calls);
-+}
++	if (!io_tlb_start)
++		goto cleanup1;
 +
-+int
-+xencomm_hypercall_callback_op(int cmd, void *arg)
-+{
-+	switch (cmd)
-+	{
-+	case CALLBACKOP_register:
-+	case CALLBACKOP_unregister:
-+		break;
-+	default:
-+		printk("%s: unknown callback op %d\n", __func__, cmd);
-+		return -ENOSYS;
++	if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
++		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
++		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
++		io_tlb_nslabs = SLABS_PER_PAGE << order;
 +	}
++	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
++	memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
 +
-+	return xencomm_arch_hypercall_callback_op
-+		(cmd, xencomm_create_inline(arg));
-+}
-+
-+static void
-+xencommize_memory_reservation (xen_memory_reservation_t *mop)
-+{
-+	struct xencomm_handle *desc;
-+
-+	desc = xencomm_create_inline(xen_guest_handle(mop->extent_start));
-+	set_xen_guest_handle(mop->extent_start, (void *)desc);
-+}
-+
-+int
-+xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
-+{
-+	XEN_GUEST_HANDLE(xen_pfn_t) extent_start_va[2];
-+	xen_memory_reservation_t *xmr = NULL, *xme_in = NULL, *xme_out = NULL;
-+	int rc;
++#ifdef CONFIG_XEN
++	for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
++		if (xen_create_contiguous_region(
++				(unsigned long)io_tlb_start +
++				(i << IO_TLB_SHIFT),
++				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++				DMA_BITS))
++			panic("Failed to setup Xen contiguous region");
++	}
++#endif
++	/*
++	 * Allocate and initialize the free list array.  This array is used
++	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++	 * between io_tlb_start and io_tlb_end.
++	 */
++	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
++	                              get_order(io_tlb_nslabs * sizeof(int)));
++	if (!io_tlb_list)
++		goto cleanup2;
 +
-+	switch (cmd) {
-+	case XENMEM_increase_reservation:
-+	case XENMEM_decrease_reservation:
-+	case XENMEM_populate_physmap:
-+		xmr = (xen_memory_reservation_t *)arg;
-+		xen_guest_handle(extent_start_va[0]) =
-+			xen_guest_handle(xmr->extent_start);
-+		xencommize_memory_reservation((xen_memory_reservation_t *)arg);
-+		break;
-+		
-+	case XENMEM_maximum_ram_page:
-+		break;
++	for (i = 0; i < io_tlb_nslabs; i++)
++ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++	io_tlb_index = 0;
 +
-+	case XENMEM_exchange:
-+		xme_in  = &((xen_memory_exchange_t *)arg)->in;
-+		xme_out = &((xen_memory_exchange_t *)arg)->out;
-+		xen_guest_handle(extent_start_va[0]) =
-+			xen_guest_handle(xme_in->extent_start);
-+		xen_guest_handle(extent_start_va[1]) =
-+			xen_guest_handle(xme_out->extent_start);
-+		xencommize_memory_reservation
-+			(&((xen_memory_exchange_t *)arg)->in);
-+		xencommize_memory_reservation
-+			(&((xen_memory_exchange_t *)arg)->out);
-+		break;
++	io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
++	                           get_order(io_tlb_nslabs * sizeof(char *)));
++	if (!io_tlb_orig_addr)
++		goto cleanup3;
 +
-+	default:
-+		printk("%s: unknown memory op %d\n", __func__, cmd);
-+		return -ENOSYS;
-+	}
++	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
 +
-+	rc =  xencomm_arch_hypercall_memory_op(cmd, xencomm_create_inline(arg));
++	/*
++	 * Get the overflow emergency buffer
++	 */
++	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
++	                                          get_order(io_tlb_overflow));
++	if (!io_tlb_overflow_buffer)
++		goto cleanup4;
 +
-+	switch (cmd) {
-+	case XENMEM_increase_reservation:
-+	case XENMEM_decrease_reservation:
-+	case XENMEM_populate_physmap:
-+		xen_guest_handle(xmr->extent_start) =
-+			xen_guest_handle(extent_start_va[0]);
-+		break;
++#ifdef CONFIG_XEN
++	if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
++					 get_order(io_tlb_overflow), DMA_BITS))
++		panic("Failed to setup Xen contiguous region for overflow");
++#endif
++	printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
++	       "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
++	       virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
 +
-+	case XENMEM_exchange:
-+		xen_guest_handle(xme_in->extent_start) =
-+			xen_guest_handle(extent_start_va[0]);
-+		xen_guest_handle(xme_out->extent_start) =
-+			xen_guest_handle(extent_start_va[1]);
-+		break;
-+	}
++	return 0;
 +
-+	return rc;
++cleanup4:
++	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
++	                                                      sizeof(char *)));
++	io_tlb_orig_addr = NULL;
++cleanup3:
++	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
++	                                                 sizeof(int)));
++	io_tlb_list = NULL;
++	io_tlb_end = NULL;
++cleanup2:
++	free_pages((unsigned long)io_tlb_start, order);
++	io_tlb_start = NULL;
++cleanup1:
++	io_tlb_nslabs = req_nslabs;
++	return -ENOMEM;
 +}
 +
-+unsigned long
-+xencomm_hypercall_hvm_op(int cmd, void *arg)
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
 +{
-+	switch (cmd) {
-+	case HVMOP_set_param:
-+	case HVMOP_get_param:
-+		break;
-+	default:
-+		printk("%s: unknown hvm op %d\n", __func__, cmd);
-+		return -ENOSYS;
-+	}
-+
-+	return xencomm_arch_hypercall_hvm_op(cmd, xencomm_create_inline(arg));
++	dma_addr_t mask = 0xffffffff;
++	/* If the device has a mask, use it, otherwise default to 32 bits */
++	if (hwdev && hwdev->dma_mask)
++		mask = *hwdev->dma_mask;
++	return (addr & ~mask) != 0;
 +}
 +
-+int
-+xencomm_hypercall_suspend(unsigned long srec)
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, char *buffer, size_t size, int dir)
 +{
-+	struct sched_shutdown arg;
++	unsigned long flags;
++	char *dma_addr;
++	unsigned int nslots, stride, index, wrap;
++	char *slot_buf;
++	int i;
 +
-+	arg.reason = SHUTDOWN_suspend;
++	/*
++	 * For mappings greater than a page, we limit the stride (and
++	 * hence alignment) to a page size.
++	 */
++	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	if (size > PAGE_SIZE)
++		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++	else
++		stride = 1;
 +
-+	return xencomm_arch_hypercall_suspend(xencomm_create_inline(&arg));
-+}
++	BUG_ON(!nslots);
 +
-+int
-+xencomm_hypercall_xenoprof_op(int op, void *arg)
-+{
-+	switch (op) {
-+	case XENOPROF_init:
-+	case XENOPROF_set_active:
-+	case XENOPROF_set_passive:
-+	case XENOPROF_counter:
-+	case XENOPROF_get_buffer:
-+		break;
++	/*
++	 * Find suitable number of IO TLB entries size that will fit this
++	 * request and allocate a buffer from that IO TLB pool.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		wrap = index = ALIGN(io_tlb_index, stride);
 +
-+	case XENOPROF_reset_active_list:
-+	case XENOPROF_reset_passive_list:
-+	case XENOPROF_reserve_counters:
-+	case XENOPROF_setup_events:
-+	case XENOPROF_enable_virq:
-+	case XENOPROF_start:
-+	case XENOPROF_stop:
-+	case XENOPROF_disable_virq:
-+	case XENOPROF_release_counters:
-+	case XENOPROF_shutdown:
-+		return xencomm_arch_hypercall_xenoprof_op(op, arg);
-+		break;
++		if (index >= io_tlb_nslabs)
++			wrap = index = 0;
 +
-+	default:
-+		printk("%s: op %d isn't supported\n", __func__, op);
-+		return -ENOSYS;
-+	}
-+	return xencomm_arch_hypercall_xenoprof_op(op,
-+						  xencomm_create_inline(arg));
-+}
++		do {
++			/*
++			 * If we find a slot that indicates we have 'nslots'
++			 * number of contiguous buffers, we allocate the
++			 * buffers from that slot and mark the entries as '0'
++			 * indicating unavailable.
++			 */
++			if (io_tlb_list[index] >= nslots) {
++				int count = 0;
 +
-+int
-+xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg, unsigned long count)
-+{
-+	switch (cmd) {
-+	case PFM_GET_FEATURES:
-+	case PFM_CREATE_CONTEXT:
-+	case PFM_WRITE_PMCS:
-+	case PFM_WRITE_PMDS:
-+	case PFM_LOAD_CONTEXT:
-+		break;
++				for (i = index; i < (int) (index + nslots); i++)
++					io_tlb_list[i] = 0;
++				for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
++					io_tlb_list[i] = ++count;
++				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
 +
-+	case PFM_DESTROY_CONTEXT:
-+	case PFM_UNLOAD_CONTEXT:
-+	case PFM_START:
-+	case PFM_STOP:
-+		return xencomm_arch_hypercall_perfmon_op(cmd, arg, count);
++				/*
++				 * Update the indices to avoid searching in
++				 * the next round.
++				 */
++				io_tlb_index = ((index + nslots) < io_tlb_nslabs
++						? (index + nslots) : 0);
 +
-+	default:
-+		printk("%s:%d cmd %ld isn't supported\n",
-+		       __func__,__LINE__, cmd);
-+		BUG();
-+	}
++				goto found;
++			}
++			index += stride;
++			if (index >= io_tlb_nslabs)
++				index = 0;
++		} while (index != wrap);
 +
-+	return xencomm_arch_hypercall_perfmon_op(cmd,
-+	                                         xencomm_create_inline(arg),
-+	                                         count);
-+}
-+
-+long
-+xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
-+{
-+	switch (cmd) {
-+	case VCPUOP_register_runstate_memory_area:
-+		xencommize_memory_reservation((xen_memory_reservation_t *)arg);
-+		break;
++		spin_unlock_irqrestore(&io_tlb_lock, flags);
++		return NULL;
++	}
++  found:
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
 +
-+	default:
-+		printk("%s: unknown vcpu op %d\n", __func__, cmd);
-+		return -ENOSYS;
++	/*
++	 * Save away the mapping from the original address to the DMA address.
++	 * This is needed when we sync the memory.  Then we sync the buffer if
++	 * needed.
++	 */
++	slot_buf = buffer;
++	for (i = 0; i < nslots; i++) {
++		io_tlb_orig_addr[index + i] = slot_buf;
++		slot_buf += 1 << IO_TLB_SHIFT;
 +	}
++	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++		memcpy(dma_addr, buffer, size);
 +
-+	return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
-+					      xencomm_create_inline(arg));
++	return dma_addr;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xcom_mini.c tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_mini.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/xcom_mini.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_mini.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,456 @@
++
 +/*
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
-+ *
-+ *          Tristan Gingold <tristan.gingold at bull.net>
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 + */
-+#include <linux/types.h>
-+#include <linux/errno.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/memory.h>
-+#include <xen/interface/xencomm.h>
-+#include <xen/interface/version.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/hvm/params.h>
-+#include <xen/interface/xenoprof.h>
-+#ifdef CONFIG_VMX_GUEST
-+#include <asm/hypervisor.h>
-+#else
-+#include <asm/hypercall.h>
-+#endif
-+#include <asm/xen/xencomm.h>
-+#include <asm/perfmon.h>
-+
-+int
-+xencomm_mini_hypercall_event_channel_op(int cmd, void *op)
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 +{
-+	struct xencomm_mini xc_area[2];
-+	int nbr_area = 2;
-+	struct xencomm_handle *desc;
-+	int rc;
++	unsigned long flags;
++	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
++	char *buffer = io_tlb_orig_addr[index];
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area,
-+	                         op, sizeof(evtchn_op_t), &desc);
-+	if (rc)
-+		return rc;
++	/*
++	 * First, sync the memory before unmapping the entry
++	 */
++	if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
++		/*
++		 * bounce... copy the data back into the original buffer * and
++		 * delete the bounce buffer.
++		 */
++		memcpy(buffer, dma_addr, size);
 +
-+	return xencomm_arch_hypercall_event_channel_op(cmd, desc);
++	/*
++	 * Return the buffer to the free list by setting the corresponding
++	 * entries to indicate the number of contigous entries available.
++	 * While returning the entries to the free list, we merge the entries
++	 * with slots below and above the pool being returned.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++			 io_tlb_list[index + nslots] : 0);
++		/*
++		 * Step 1: return the slots to the free list, merging the
++		 * slots with superceeding slots
++		 */
++		for (i = index + nslots - 1; i >= index; i--)
++			io_tlb_list[i] = ++count;
++		/*
++		 * Step 2: merge the returned slots with the preceding slots,
++		 * if available (non zero)
++		 */
++		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
++			io_tlb_list[i] = ++count;
++	}
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_event_channel_op);
 +
-+static int
-+xencommize_mini_grant_table_op(struct xencomm_mini *xc_area, int *nbr_area,
-+                               unsigned int cmd, void *op, unsigned int count,
-+                               struct xencomm_handle **desc)
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size,
++	    int dir, int target)
 +{
-+	struct xencomm_handle *desc1;
-+	unsigned int argsize;
-+	int rc;
++	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
++	char *buffer = io_tlb_orig_addr[index];
 +
-+	switch (cmd) {
-+	case GNTTABOP_map_grant_ref:
-+		argsize = sizeof(struct gnttab_map_grant_ref);
++	switch (target) {
++	case SYNC_FOR_CPU:
++		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
++			memcpy(buffer, dma_addr, size);
++		else
++			BUG_ON(dir != DMA_TO_DEVICE);
 +		break;
-+	case GNTTABOP_unmap_grant_ref:
-+		argsize = sizeof(struct gnttab_unmap_grant_ref);
++	case SYNC_FOR_DEVICE:
++		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
++			memcpy(dma_addr, buffer, size);
++		else
++			BUG_ON(dir != DMA_FROM_DEVICE);
 +		break;
-+	case GNTTABOP_setup_table:
-+	{
-+		struct gnttab_setup_table *setup = op;
++	default:
++		BUG();
++	}
++}
 +
-+		argsize = sizeof(*setup);
++void *
++swiotlb_alloc_coherent(struct device *hwdev, size_t size,
++		       dma_addr_t *dma_handle, gfp_t flags)
++{
++	unsigned long dev_addr;
++	void *ret;
++	int order = get_order(size);
 +
-+		if (count != 1)
-+			return -EINVAL;
-+		rc = xencomm_create_mini
-+			(xc_area, nbr_area,
-+			 xen_guest_handle(setup->frame_list),
-+			 setup->nr_frames 
-+			 * sizeof(*xen_guest_handle(setup->frame_list)),
-+			 &desc1);
-+		if (rc)
-+			return rc;
-+		set_xen_guest_handle(setup->frame_list, (void *)desc1);
-+		break;
++	/*
++	 * XXX fix me: the DMA API should pass us an explicit DMA mask
++	 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
++	 * bit range instead of a 16MB one).
++	 */
++	flags |= GFP_DMA;
++
++	ret = (void *)__get_free_pages(flags, order);
++#ifdef CONFIG_XEN
++	if (ret && is_running_on_xen()) {
++		if (xen_create_contiguous_region((unsigned long)ret, order,
++					fls64(hwdev->coherent_dma_mask))) {
++			free_pages((unsigned long)ret, order);
++			ret = NULL;
++		} else {
++			/*
++			 * Short circuit the rest, xen_create_contiguous_region
++			 * should fail if it didn't give us an address within
++			 * the mask requested.  
++			 */
++			memset(ret, 0, size);
++			*dma_handle = virt_to_bus(ret);
++			return ret;
++		}
 +	}
-+	case GNTTABOP_dump_table:
-+		argsize = sizeof(struct gnttab_dump_table);
-+		break;
-+	case GNTTABOP_transfer:
-+		argsize = sizeof(struct gnttab_transfer);
-+		break;
-+	case GNTTABOP_copy:
-+		argsize = sizeof(struct gnttab_copy);
-+		break;
-+	case GNTTABOP_query_size:
-+		argsize = sizeof(struct gnttab_query_size);
-+		break;
-+	default:
-+		printk("%s: unknown mini grant table op %d\n", __func__, cmd);
-+		BUG();
++#endif
++	if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
++		/*
++		 * The allocated memory isn't reachable by the device.
++		 * Fall back on swiotlb_map_single().
++		 */
++		free_pages((unsigned long) ret, order);
++		ret = NULL;
 +	}
++	if (!ret) {
++		/*
++		 * We are either out of memory or the device can't DMA
++		 * to GFP_DMA memory; fall back on
++		 * swiotlb_map_single(), which will grab memory from
++		 * the lowest available address range.
++		 */
++		dma_addr_t handle;
++		handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
++		if (swiotlb_dma_mapping_error(handle))
++			return NULL;
 +
-+	rc = xencomm_create_mini(xc_area, nbr_area, op, count * argsize, desc);
-+	if (rc)
-+		return rc;
++		ret = bus_to_virt(handle);
++	}
 +
-+	return 0;
++	memset(ret, 0, size);
++	dev_addr = virt_to_bus(ret);
++
++	/* Confirm address can be DMA'd by device */
++	if (address_needs_mapping(hwdev, dev_addr)) {
++		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
++		       (unsigned long long)*hwdev->dma_mask, dev_addr);
++		panic("swiotlb_alloc_coherent: allocated memory is out of "
++		      "range for device");
++	}
++	*dma_handle = dev_addr;
++	return ret;
 +}
 +
-+int
-+xencomm_mini_hypercall_grant_table_op(unsigned int cmd, void *op,
-+                                      unsigned int count)
++void
++swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
++		      dma_addr_t dma_handle)
 +{
-+	int rc;
-+	struct xencomm_handle *desc;
-+	int nbr_area = 2;
-+	struct xencomm_mini xc_area[2];
++	if (!(vaddr >= (void *)io_tlb_start
++                    && vaddr < (void *)io_tlb_end)) {
++#ifdef CONFIG_XEN
++		xen_destroy_contiguous_region((unsigned long)vaddr,
++					      get_order(size));
++#endif
++		free_pages((unsigned long) vaddr, get_order(size));
++	} else
++		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
++		swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
++}
 +
-+	rc = xencommize_mini_grant_table_op(xc_area, &nbr_area,
-+	                                    cmd, op, count, &desc);
-+	if (rc)
-+		return rc;
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++	/*
++	 * Ran out of IOMMU space for this operation. This is very bad.
++	 * Unfortunately the drivers cannot handle this operation properly.
++	 * unless they check for dma_mapping_error (most don't)
++	 * When the mapping is small enough return a static buffer to limit
++	 * the damage, or panic when the transfer is too big.
++	 */
++	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
++	       "device %s\n", size, dev ? dev->bus_id : "?");
 +
-+	return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
++	if (size > io_tlb_overflow && do_panic) {
++		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
++			panic("DMA: Memory would be corrupted\n");
++		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++			panic("DMA: Random memory would be DMAed\n");
++	}
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_grant_table_op);
 +
-+int
-+xencomm_mini_hypercall_multicall(void *call_list, int nr_calls)
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode.  The
++ * physical address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 +{
-+	int i;
-+	multicall_entry_t *mce;
-+	int nbr_area = 2 + nr_calls * 3;
-+	struct xencomm_mini xc_area[nbr_area];
-+	struct xencomm_handle *desc;
-+	int rc;
++	unsigned long dev_addr = gnttab_dma_map_virt(ptr);
++	void *map;
 +
-+	for (i = 0; i < nr_calls; i++) {
-+		mce = (multicall_entry_t *)call_list + i;
++	BUG_ON(dir == DMA_NONE);
++	/*
++	 * If the pointer passed in happens to be in the device's DMA window,
++	 * we can safely return the device addr and not worry about bounce
++	 * buffering it.
++	 */
++	if (!range_straddles_page_boundary(__pa(ptr), size) &&
++	    !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
++		return dev_addr;
 +
-+		switch (mce->op) {
-+		case __HYPERVISOR_update_va_mapping:
-+		case __HYPERVISOR_mmu_update:
-+			/* No-op on ia64.  */
-+			break;
-+		case __HYPERVISOR_grant_table_op:
-+			rc = xencommize_mini_grant_table_op
-+				(xc_area, &nbr_area,
-+				 mce->args[0], (void *)mce->args[1],
-+				 mce->args[2], &desc);
-+			if (rc)
-+				return rc;
-+			mce->args[1] = (unsigned long)desc;
-+			break;
-+		case __HYPERVISOR_memory_op:
-+		default:
-+			printk("%s: unhandled multicall op entry op %lu\n",
-+			       __func__, mce->op);
-+			return -ENOSYS;
-+		}
++	__gnttab_dma_unmap_page(virt_to_page(ptr));
++	/*
++	 * Oh well, have to allocate and map a bounce buffer.
++	 */
++	map = map_single(hwdev, ptr, size, dir);
++	if (!map) {
++		swiotlb_full(hwdev, size, dir, 1);
++		map = io_tlb_overflow_buffer;
 +	}
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, call_list,
-+	                         nr_calls * sizeof(multicall_entry_t), &desc);
-+	if (rc)
-+		return rc;
++	dev_addr = virt_to_bus(map);
 +
-+	return xencomm_arch_hypercall_multicall(desc, nr_calls);
++	/*
++	 * Ensure that the address returned is DMA'ble
++	 */
++	if (address_needs_mapping(hwdev, dev_addr))
++		panic("map_single: bounce buffer is not DMA'ble");
++
++	return dev_addr;
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_multicall);
 +
-+static int
-+xencommize_mini_memory_reservation(struct xencomm_mini *area, int *nbr_area,
-+                                   xen_memory_reservation_t *mop)
++/*
++ * Since DMA is i-cache coherent, any (complete) pages that were written via
++ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
++ * flush them when they get mapped into an executable vm-area.
++ */
++static void
++mark_clean(void *addr, size_t size)
 +{
-+	struct xencomm_handle *desc;
-+	int rc;
-+
-+	rc = xencomm_create_mini
-+		(area, nbr_area,
-+		 xen_guest_handle(mop->extent_start),
-+		 mop->nr_extents 
-+		 * sizeof(*xen_guest_handle(mop->extent_start)),
-+		 &desc);
-+	if (rc)
-+		return rc;
++	unsigned long pg_addr, end;
 +
-+	set_xen_guest_handle(mop->extent_start, (void *)desc);
++#ifdef CONFIG_XEN
++	/* XXX: Bad things happen when starting domUs if this is enabled. */
++	if (is_running_on_xen())
++		return;
++#endif
 +
-+	return 0;
++	pg_addr = PAGE_ALIGN((unsigned long) addr);
++	end = (unsigned long) addr + size;
++	while (pg_addr + PAGE_SIZE <= end) {
++		struct page *page = virt_to_page(pg_addr);
++		set_bit(PG_arch_1, &page->flags);
++		pg_addr += PAGE_SIZE;
++	}
 +}
 +
-+int
-+xencomm_mini_hypercall_memory_op(unsigned int cmd, void *arg)
++/*
++ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call.  All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++		     int dir)
 +{
-+	int nbr_area = 4;
-+	struct xencomm_mini xc_area[4];
-+	struct xencomm_handle *desc;
-+	int rc;
-+	unsigned int argsize;
-+
-+	switch (cmd) {
-+	case XENMEM_increase_reservation:
-+	case XENMEM_decrease_reservation:
-+	case XENMEM_populate_physmap:
-+		argsize = sizeof(xen_memory_reservation_t);
-+		rc = xencommize_mini_memory_reservation
-+			(xc_area, &nbr_area, (xen_memory_reservation_t *)arg);
-+		if (rc)
-+			return rc;
-+		break;
-+		
-+	case XENMEM_maximum_ram_page:
-+		argsize = 0;
-+		break;
++	char *dma_addr = bus_to_virt(dev_addr);
 +
-+	case XENMEM_exchange:
-+		argsize = sizeof(xen_memory_exchange_t);
-+		rc = xencommize_mini_memory_reservation
-+			(xc_area, &nbr_area,
-+			 &((xen_memory_exchange_t *)arg)->in);
-+		if (rc)
-+			return rc;
-+		rc = xencommize_mini_memory_reservation
-+			(xc_area, &nbr_area,
-+			 &((xen_memory_exchange_t *)arg)->out);
-+		if (rc)
-+			return rc;
-+		break;
++	BUG_ON(dir == DMA_NONE);
++	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++		unmap_single(hwdev, dma_addr, size, dir);
++	else {
++		__gnttab_dma_unmap_page(virt_to_page(dma_addr));
++		if (dir == DMA_FROM_DEVICE)
++			mark_clean(dma_addr, size);
++	}
++}
 +
-+	case XENMEM_add_to_physmap:
-+		argsize = sizeof (xen_add_to_physmap_t);
-+		break;
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the dma mapping, you must
++ * call this function before doing so.  At the next point you give the dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++static inline void
++swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
++		    size_t size, int dir, int target)
++{
++	char *dma_addr = bus_to_virt(dev_addr);
 +
-+	default:
-+		printk("%s: unknown mini memory op %d\n", __func__, cmd);
-+		return -ENOSYS;
-+	}
++	BUG_ON(dir == DMA_NONE);
++	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++		sync_single(hwdev, dma_addr, size, dir, target);
++	else if (dir == DMA_FROM_DEVICE)
++		mark_clean(dma_addr, size);
++}
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++			    size_t size, int dir)
++{
++	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
++}
 +
-+	return xencomm_arch_hypercall_memory_op(cmd, desc);
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++			       size_t size, int dir)
++{
++	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_memory_op);
 +
-+unsigned long
-+xencomm_mini_hypercall_hvm_op(int cmd, void *arg)
++/*
++ * Same as above, but for a sub-range of the mapping.
++ */
++static inline void
++swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
++			  unsigned long offset, size_t size,
++			  int dir, int target)
 +{
-+	struct xencomm_handle *desc;
-+	int nbr_area = 2;
-+	struct xencomm_mini xc_area[2];
-+	unsigned int argsize;
-+	int rc;
++	char *dma_addr = bus_to_virt(dev_addr) + offset;
 +
-+	switch (cmd) {
-+	case HVMOP_get_param:
-+	case HVMOP_set_param:
-+		argsize = sizeof(xen_hvm_param_t);
-+		break;
-+	default:
-+		printk("%s: unknown HVMOP %d\n", __func__, cmd);
-+		return -EINVAL;
-+	}
++	BUG_ON(dir == DMA_NONE);
++	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++		sync_single(hwdev, dma_addr, size, dir, target);
++	else if (dir == DMA_FROM_DEVICE)
++		mark_clean(dma_addr, size);
++}
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
++void
++swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++				  unsigned long offset, size_t size, int dir)
++{
++	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++				  SYNC_FOR_CPU);
++}
 +
-+	return xencomm_arch_hypercall_hvm_op(cmd, desc);
++void
++swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
++				     unsigned long offset, size_t size, int dir)
++{
++	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++				  SYNC_FOR_DEVICE);
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_hvm_op);
 +
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface.  Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length.  They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ *       DMA address/length pairs than there are SG table elements.
++ *       (for example via virtual mapping capabilities)
++ *       The routine returns the number of addr/length pairs actually
++ *       used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
 +int
-+xencomm_mini_hypercall_xen_version(int cmd, void *arg)
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++	       int dir)
 +{
-+	struct xencomm_handle *desc;
-+	int nbr_area = 2;
-+	struct xencomm_mini xc_area[2];
-+	unsigned int argsize;
-+	int rc;
++	void *addr;
++	unsigned long dev_addr;
++	int i;
 +
-+	switch (cmd) {
-+	case XENVER_version:
-+		/* do not actually pass an argument */
-+		return xencomm_arch_hypercall_xen_version(cmd, 0);
-+	case XENVER_extraversion:
-+		argsize = sizeof(xen_extraversion_t);
-+		break;
-+	case XENVER_compile_info:
-+		argsize = sizeof(xen_compile_info_t);
-+		break;
-+	case XENVER_capabilities:
-+		argsize = sizeof(xen_capabilities_info_t);
-+		break;
-+	case XENVER_changeset:
-+		argsize = sizeof(xen_changeset_info_t);
-+		break;
-+	case XENVER_platform_parameters:
-+		argsize = sizeof(xen_platform_parameters_t);
-+		break;
-+	case XENVER_pagesize:
-+		argsize = (arg == NULL) ? 0 : sizeof(void *);
-+		break;
-+	case XENVER_get_features:
-+		argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
-+		break;
++	BUG_ON(dir == DMA_NONE);
 +
-+	default:
-+		printk("%s: unknown version op %d\n", __func__, cmd);
-+		return -ENOSYS;
++	for (i = 0; i < nelems; i++, sg++) {
++		addr = SG_ENT_VIRT_ADDRESS(sg);
++		dev_addr = gnttab_dma_map_virt(addr);
++		if (swiotlb_force ||
++		    range_straddles_page_boundary(page_to_pseudophys(sg->page)
++						  + sg->offset, sg->length) ||
++		    address_needs_mapping(hwdev, dev_addr)) {
++			void *map;
++			__gnttab_dma_unmap_page(sg->page);
++			map = map_single(hwdev, addr, sg->length, dir);
++			sg->dma_address = virt_to_bus(map);
++			if (!map) {
++				/* Don't panic here, we expect map_sg users
++				   to do proper error handling. */
++				swiotlb_full(hwdev, sg->length, dir, 0);
++				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++				sg[0].dma_length = 0;
++				return 0;
++			}
++		} else
++			sg->dma_address = dev_addr;
++		sg->dma_length = sg->length;
 +	}
++	return nelems;
++}
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
++/*
++ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++		 int dir)
++{
++	int i;
 +
-+	return xencomm_arch_hypercall_xen_version(cmd, desc);
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			unmap_single(hwdev, (void *) bus_to_virt(sg->dma_address), sg->dma_length, dir);
++		else {
++			__gnttab_dma_unmap_page(sg->page);
++			if (dir == DMA_FROM_DEVICE)
++				mark_clean(SG_ENT_VIRT_ADDRESS(sg),
++					   sg->dma_length);
++		}
 +}
-+EXPORT_SYMBOL(xencomm_mini_hypercall_xen_version);
 +
-+int
-+xencomm_mini_hypercall_xenoprof_op(int op, void *arg)
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++static inline void
++swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
++		int nelems, int dir, int target)
 +{
-+	unsigned int argsize;
-+	struct xencomm_mini xc_area[2];
-+	int nbr_area = 2;
-+	struct xencomm_handle *desc;
-+	int rc;
++	int i;
 +
-+	switch (op) {
-+	case XENOPROF_init:
-+		argsize = sizeof(xenoprof_init_t);
-+		break;
-+	case XENOPROF_set_active:
-+		argsize = sizeof(domid_t);
-+		break;
-+	case XENOPROF_set_passive:
-+		argsize = sizeof(xenoprof_passive_t);
-+		break;
-+	case XENOPROF_counter:
-+		argsize = sizeof(xenoprof_counter_t);
-+		break;
-+	case XENOPROF_get_buffer:
-+		argsize = sizeof(xenoprof_get_buffer_t);
-+		break;
++	BUG_ON(dir == DMA_NONE);
 +
-+	case XENOPROF_reset_active_list:
-+	case XENOPROF_reset_passive_list:
-+	case XENOPROF_reserve_counters:
-+	case XENOPROF_setup_events:
-+	case XENOPROF_enable_virq:
-+	case XENOPROF_start:
-+	case XENOPROF_stop:
-+	case XENOPROF_disable_virq:
-+	case XENOPROF_release_counters:
-+	case XENOPROF_shutdown:
-+		return xencomm_arch_hypercall_xenoprof_op(op, arg);
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			sync_single(hwdev, (void *) sg->dma_address,
++				    sg->dma_length, dir, target);
++}
 +
-+	default:
-+		printk("%s: op %d isn't supported\n", __func__, op);
-+		return -ENOSYS;
-+	}
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
-+	return xencomm_arch_hypercall_xenoprof_op(op, desc);
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++			int nelems, int dir)
++{
++	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++			   int nelems, int dir)
++{
++	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 +}
-+EXPORT_SYMBOL_GPL(xencomm_mini_hypercall_xenoprof_op);
 +
 +int
-+xencomm_mini_hypercall_perfmon_op(unsigned long cmd, void* arg,
-+                                  unsigned long count)
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
 +{
-+	unsigned int argsize;
-+	struct xencomm_mini xc_area[2];
-+	int nbr_area = 2;
-+	struct xencomm_handle *desc;
-+	int rc;
++	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
 +
-+	switch (cmd) {
-+	case PFM_GET_FEATURES:
-+		argsize = sizeof(pfarg_features_t);
-+		break;
-+	case PFM_CREATE_CONTEXT:
-+		argsize = sizeof(pfarg_context_t);
-+		break;
-+	case PFM_LOAD_CONTEXT:
-+		argsize = sizeof(pfarg_load_t);
-+		break;
-+	case PFM_WRITE_PMCS:
-+	case PFM_WRITE_PMDS:
-+		argsize = sizeof(pfarg_reg_t) * count;
-+		break;
++/*
++ * Return whether the given device DMA address mask can be supported
++ * properly.  For example, if your device can only drive the low 24-bits
++ * during bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++#ifdef CONFIG_XEN
++	return (virt_to_bus(io_tlb_end - 1)) <= mask;
++#else
++	return (virt_to_bus(io_tlb_end) - 1) <= mask;
++#endif
++}
 +
-+	case PFM_DESTROY_CONTEXT:
-+	case PFM_UNLOAD_CONTEXT:
-+	case PFM_START:
-+	case PFM_STOP:
-+		return xencomm_arch_hypercall_perfmon_op(cmd, arg, count);
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
++EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_alloc_coherent);
++EXPORT_SYMBOL(swiotlb_free_coherent);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/util.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/util.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,102 @@
++/******************************************************************************
++ * arch/ia64/xen/util.c
++ * This file is the ia64 counterpart of drivers/xen/util.c
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
 +
-+	default:
-+		printk("%s:%d cmd %ld isn't supported\n",
-+		       __func__, __LINE__, cmd);
-+		BUG();
-+	}
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++#include <xen/interface/memory.h>
++#include <asm/hypercall.h>
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
-+	return xencomm_arch_hypercall_perfmon_op(cmd, desc, count);
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++	int order;
++	unsigned long virt;
++	unsigned long nr_pages;
++	struct vm_struct *area;
++
++	order = get_order(size);
++	virt = __get_free_pages(GFP_KERNEL, order);
++	if (virt == 0)
++		goto err0;
++	nr_pages = 1 << order;
++	scrub_pages(virt, nr_pages);
++
++	area = kmalloc(sizeof(*area), GFP_KERNEL);
++	if (area == NULL)
++		goto err1;
++
++        area->flags = VM_IOREMAP; /* XXX */
++        area->addr = (void*)virt;
++        area->size = size;
++        area->pages = NULL; /* XXX */
++        area->nr_pages = nr_pages;
++        area->phys_addr = 0; 	/* xenbus_map_ring_valloc uses this field!  */
++
++	return area;
++
++err1:
++	free_pages(virt, order);
++err0:
++	return NULL;
 +}
-+EXPORT_SYMBOL_GPL(xencomm_mini_hypercall_perfmon_op);
++EXPORT_SYMBOL_GPL(alloc_vm_area);
 +
-+int
-+xencomm_mini_hypercall_sched_op(int cmd, void *arg)
++void free_vm_area(struct vm_struct *area)
 +{
-+	int rc, nbr_area = 2;
-+	struct xencomm_mini xc_area[2];
-+	struct xencomm_handle *desc;
-+	unsigned int argsize;
-+
-+	switch (cmd) {
-+	case SCHEDOP_yield:
-+	case SCHEDOP_block:
-+		argsize = 0;
-+		break;
-+	case SCHEDOP_shutdown:
-+		argsize = sizeof(sched_shutdown_t);
-+		break;
-+	case SCHEDOP_poll:
-+		argsize = sizeof(sched_poll_t);
-+		break;
-+	case SCHEDOP_remote_shutdown:
-+		argsize = sizeof(sched_remote_shutdown_t);
-+		break;
++	unsigned int order = get_order(area->size);
++	unsigned long i;
++	unsigned long phys_addr = __pa(area->addr);
 +
-+	default:
-+		printk("%s: unknown sched op %d\n", __func__, cmd);
-+		return -ENOSYS;
++	/* This area is used for foreign page mappping.
++	 * So underlying machine page may not be assigned. */
++	for (i = 0; i < (1 << order); i++) {
++		unsigned long ret;
++		unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
++		struct xen_memory_reservation reservation = {
++			.nr_extents   = 1,
++			.address_bits = 0,
++			.extent_order = 0,
++			.domid        = DOMID_SELF
++		};
++		set_xen_guest_handle(reservation.extent_start, &gpfn);
++		ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++					   &reservation);
++		BUG_ON(ret != 1);
 +	}
++	free_pages((unsigned long)area->addr, order);
++	kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
 +
-+	rc = xencomm_create_mini(xc_area, &nbr_area, arg, argsize, &desc);
-+	if (rc)
-+		return rc;
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xcom_asm.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xcom_asm.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,27 @@
++/*
++ * xencomm suspend support
++ * Support routines for Xen
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
++ */
++#include <asm/asmmacro.h>
++#include <xen/interface/xen.h>
 +
-+	return xencomm_arch_hypercall_sched_op(cmd, desc);
-+}
-+EXPORT_SYMBOL_GPL(xencomm_mini_hypercall_sched_op);
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xcom_privcmd.c tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_privcmd.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/xcom_privcmd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xcom_privcmd.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,663 @@
++/*
++ * Stub for suspend.
++ * Just force the stacked registers to be written in memory.
++ */
++GLOBAL_ENTRY(xencomm_arch_hypercall_suspend)
++	;; 
++	alloc r20=ar.pfs,0,0,6,0
++	mov r2=__HYPERVISOR_sched_op
++	;; 
++	/* We don't want to deal with RSE.  */
++	flushrs
++	mov r33=r32
++	mov r32=2 // SCHEDOP_shutdown
++	;;
++	break 0x1000
++	;; 
++	br.ret.sptk.many b0
++END(xencomm_arch_hypercall_suspend)
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xcom_hcall.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xcom_hcall.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,671 @@
 +/*
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
@@ -26879,8 +29877,12 @@
 + * along with this program; if not, write to the Free Software
 + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 + *
-+ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
 + *          Tristan Gingold <tristan.gingold at bull.net>
++ *
++ *          Copyright (c) 2007
++ *          Isaku Yamahata <yamahata at valinux co jp>
++ *                          VA Linux Systems Japan K.K.
++ *          consolidate mini and inline version.
 + */
 +#include <linux/types.h>
 +#include <linux/errno.h>
@@ -26889,989 +29891,1939 @@
 +#include <linux/module.h>
 +#include <xen/interface/xen.h>
 +#include <xen/interface/platform.h>
-+#define __XEN__
-+#include <xen/interface/domctl.h>
-+#include <xen/interface/sysctl.h>
 +#include <xen/interface/memory.h>
++#include <xen/interface/xencomm.h>
 +#include <xen/interface/version.h>
++#include <xen/interface/sched.h>
 +#include <xen/interface/event_channel.h>
-+#include <xen/interface/acm_ops.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/callback.h>
++#include <xen/interface/xsm/acm_ops.h>
 +#include <xen/interface/hvm/params.h>
-+#include <xen/public/privcmd.h>
-+#include <asm/hypercall.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/interface/vcpu.h>
++#include <xen/interface/kexec.h>
++#include <asm/hypervisor.h>
 +#include <asm/page.h>
 +#include <asm/uaccess.h>
 +#include <asm/xen/xencomm.h>
++#include <asm/perfmon.h>
 +
-+#define ROUND_DIV(v,s) (((v) + (s) - 1) / (s))
++/* Xencomm notes:
++ * This file defines hypercalls to be used by xencomm.  The hypercalls simply
++ * create inlines or mini descriptors for pointers and then call the raw arch
++ * hypercall xencomm_arch_hypercall_XXX
++ *
++ * If the arch wants to directly use these hypercalls, simply define macros
++ * in asm/hypercall.h, eg:
++ *  #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
++ * 
++ * The arch may also define HYPERVISOR_xxx as a function and do more operations
++ * before/after doing the hypercall.
++ *
++ * Note: because only inline or mini descriptors are created these functions
++ * must only be called with in kernel memory parameters.
++ */
 +
-+static int
-+xencomm_privcmd_platform_op(privcmd_hypercall_t *hypercall)
++int
++xencomm_hypercall_console_io(int cmd, int count, char *str)
 +{
-+	struct xen_platform_op kern_op;
-+	struct xen_platform_op __user *user_op = (struct xen_platform_op __user *)hypercall->arg[0];
-+	struct xencomm_handle *op_desc;
-+	struct xencomm_handle *desc = NULL;
-+	int ret = 0;
++	return xencomm_arch_hypercall_console_io
++		(cmd, count, xencomm_map_no_alloc(str, count));
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
 +
-+	if (copy_from_user(&kern_op, user_op, sizeof(struct xen_platform_op)))
-+		return -EFAULT;
++int
++xencomm_hypercall_event_channel_op(int cmd, void *op)
++{
++	struct xencomm_handle *desc;
++	desc = xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	if (kern_op.interface_version != XENPF_INTERFACE_VERSION)
-+		return -EACCES;
++	return xencomm_arch_hypercall_event_channel_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
 +
-+	op_desc = xencomm_create_inline(&kern_op);
++int
++xencomm_hypercall_xen_version(int cmd, void *arg)
++{
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++
++	switch (cmd) {
++	case XENVER_version:
++		/* do not actually pass an argument */
++		return xencomm_arch_hypercall_xen_version(cmd, 0);
++	case XENVER_extraversion:
++		argsize = sizeof(xen_extraversion_t);
++		break;
++	case XENVER_compile_info:
++		argsize = sizeof(xen_compile_info_t);
++		break;
++	case XENVER_capabilities:
++		argsize = sizeof(xen_capabilities_info_t);
++		break;
++	case XENVER_changeset:
++		argsize = sizeof(xen_changeset_info_t);
++		break;
++	case XENVER_platform_parameters:
++		argsize = sizeof(xen_platform_parameters_t);
++		break;
++	case XENVER_pagesize:
++		argsize = (arg == NULL) ? 0 : sizeof(void *);
++		break;
++	case XENVER_get_features:
++		argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
++		break;
 +
-+	switch (kern_op.cmd) {
 +	default:
-+		printk("%s: unknown platform cmd %d\n", __func__, kern_op.cmd);
++		printk("%s: unknown version op %d\n", __func__, cmd);
 +		return -ENOSYS;
 +	}
 +
-+	if (ret) {
-+		/* error mapping the nested pointer */
-+		return ret;
-+	}
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	ret = xencomm_arch_hypercall_platform_op(op_desc);
++	return xencomm_arch_hypercall_xen_version(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
 +
-+	/* FIXME: should we restore the handle?  */
-+	if (copy_to_user(user_op, &kern_op, sizeof(struct xen_platform_op)))
-+		ret = -EFAULT;
++int
++xencomm_hypercall_physdev_op(int cmd, void *op)
++{
++	unsigned int argsize;
++	
++	switch (cmd) {
++	case PHYSDEVOP_apic_read:
++	case PHYSDEVOP_apic_write:
++		argsize = sizeof(physdev_apic_t);
++		break;
++	case PHYSDEVOP_alloc_irq_vector:
++	case PHYSDEVOP_free_irq_vector:
++		argsize = sizeof(physdev_irq_t);
++		break;
++	case PHYSDEVOP_irq_status_query:
++		argsize = sizeof(physdev_irq_status_query_t);
++		break;
++	case PHYSDEVOP_manage_pci_add:
++	case PHYSDEVOP_manage_pci_remove:
++		argsize = sizeof(physdev_manage_pci_t);
++		break;
++	case PHYSDEVOP_map_pirq:
++		argsize = sizeof(physdev_map_pirq_t);
++		break;
++	case PHYSDEVOP_unmap_pirq:
++		argsize = sizeof(physdev_unmap_pirq_t);
++		break;
 +
-+	if (desc)
-+		xencomm_free(desc);
-+	return ret;
++	default:
++		printk("%s: unknown physdev op %d\n", __func__, cmd);
++		return -ENOSYS;
++	}
++	
++	return xencomm_arch_hypercall_physdev_op
++		(cmd, xencomm_map_no_alloc(op, argsize));
 +}
 +
-+/*
-+ * Temporarily disable the NUMA PHYSINFO code until the rest of the
-+ * changes are upstream.
-+ */
-+#undef IA64_NUMA_PHYSINFO
-+
 +static int
-+xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
++xencommize_grant_table_op(struct xencomm_mini **xc_area,
++			  unsigned int cmd, void *op, unsigned int count,
++			  struct xencomm_handle **desc)
 +{
-+	xen_sysctl_t kern_op;
-+	xen_sysctl_t __user *user_op;
-+	struct xencomm_handle *op_desc;
-+	struct xencomm_handle *desc = NULL;
-+	struct xencomm_handle *desc1 = NULL;
-+	int ret = 0;
-+
-+	user_op = (xen_sysctl_t __user *)hypercall->arg[0];
-+
-+	if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
-+		return -EFAULT;
-+
-+	if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
-+		return -EACCES;
-+
-+	op_desc = xencomm_create_inline(&kern_op);
++	struct xencomm_handle *desc1;
++	unsigned int argsize;
 +
-+	switch (kern_op.cmd) {
-+	case XEN_SYSCTL_readconsole:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.readconsole.buffer),
-+			kern_op.u.readconsole.count,
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.readconsole.buffer,
-+		                     (void *)desc);
++	switch (cmd) {
++	case GNTTABOP_map_grant_ref:
++		argsize = sizeof(struct gnttab_map_grant_ref);
 +		break;
-+	case XEN_SYSCTL_tbuf_op:
-+#ifndef IA64_NUMA_PHYSINFO
-+	case XEN_SYSCTL_physinfo:
-+#endif
-+	case XEN_SYSCTL_sched_id:
++	case GNTTABOP_unmap_grant_ref:
++		argsize = sizeof(struct gnttab_unmap_grant_ref);
 +		break;
-+	case XEN_SYSCTL_perfc_op:
++	case GNTTABOP_unmap_and_replace:
++		argsize = sizeof(struct gnttab_unmap_and_replace);
++		break;
++	case GNTTABOP_setup_table:
 +	{
-+		struct xencomm_handle *tmp_desc;
-+		xen_sysctl_t tmp_op = {
-+			.cmd = XEN_SYSCTL_perfc_op,
-+			.interface_version = XEN_SYSCTL_INTERFACE_VERSION,
-+			.u.perfc_op = {
-+				.cmd = XEN_SYSCTL_PERFCOP_query,
-+				// .desc.p = NULL,
-+				// .val.p = NULL,
-+			},
-+		};
-+
-+		if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
-+			if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
-+				return -EINVAL;
-+			break;
-+		}
-+
-+		/* query the buffer size for xencomm */
-+		tmp_desc = xencomm_create_inline(&tmp_op);
-+		ret = xencomm_arch_hypercall_sysctl(tmp_desc);
-+		if (ret)
-+			return ret;
-+
-+		ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.desc),
-+		                     tmp_op.u.perfc_op.nr_counters *
-+		                     sizeof(xen_sysctl_perfc_desc_t),
-+		                     &desc, GFP_KERNEL);
-+		if (ret)
-+			return ret;
-+
-+		set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);
++		struct gnttab_setup_table *setup = op;
 +
-+		ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.val),
-+		                     tmp_op.u.perfc_op.nr_vals *
-+		                     sizeof(xen_sysctl_perfc_val_t),
-+		                     &desc1, GFP_KERNEL);
-+		if (ret)
-+			xencomm_free(desc);
++		argsize = sizeof(*setup);
 +
-+		set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
++		if (count != 1)
++			return -EINVAL;
++		desc1 = __xencomm_map_no_alloc
++			(xen_guest_handle(setup->frame_list),
++			 setup->nr_frames *
++			 sizeof(*xen_guest_handle(setup->frame_list)),
++			 *xc_area);
++		if (desc1 == NULL)
++			return -EINVAL;
++		(*xc_area)++;
++		set_xen_guest_handle(setup->frame_list, (void *)desc1);
 +		break;
 +	}
-+	case XEN_SYSCTL_getdomaininfolist:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
-+			kern_op.u.getdomaininfolist.max_domains *
-+			sizeof(xen_domctl_getdomaininfo_t),
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
-+				     (void *)desc);
++	case GNTTABOP_dump_table:
++		argsize = sizeof(struct gnttab_dump_table);
 +		break;
-+#ifdef IA64_NUMA_PHYSINFO
-+	case XEN_SYSCTL_physinfo:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.physinfo.memory_chunks),
-+			PUBLIC_MAXCHUNKS * sizeof(node_data_t),
-+			&desc, GFP_KERNEL);
-+		if (ret)
-+			return ret;
-+		set_xen_guest_handle(kern_op.u.physinfo.memory_chunks,
-+		                     (void *)desc);
-+
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
-+			PUBLIC_MAX_NUMNODES * sizeof(u64),
-+			&desc1, GFP_KERNEL);
-+		if (ret)
-+			xencomm_free(desc);
-+		set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
-+		                     (void *)desc1);
++	case GNTTABOP_transfer:
++		argsize = sizeof(struct gnttab_transfer);
++		break;
++	case GNTTABOP_copy:
++		argsize = sizeof(struct gnttab_copy);
++		break;
++	case GNTTABOP_query_size:
++		argsize = sizeof(struct gnttab_query_size);
 +		break;
-+#endif
 +	default:
-+		printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
-+		return -ENOSYS;
-+	}
-+
-+	if (ret) {
-+		/* error mapping the nested pointer */
-+		return ret;
++		printk("%s: unknown hypercall grant table op %d\n",
++		       __func__, cmd);
++		BUG();
 +	}
 +
-+	ret = xencomm_arch_hypercall_sysctl(op_desc);
-+
-+	/* FIXME: should we restore the handles?  */
-+	if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
-+		ret = -EFAULT;
++	*desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
++	if (*desc == NULL)
++		return -EINVAL;
++	(*xc_area)++;
 +
-+	if (desc)
-+		xencomm_free(desc);
-+	if (desc1)
-+		xencomm_free(desc1);
-+	return ret;
++	return 0;
 +}
 +
-+static int
-+xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
++int
++xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
++                                      unsigned int count)
 +{
-+	xen_domctl_t kern_op;
-+	xen_domctl_t __user *user_op;
-+	struct xencomm_handle *op_desc;
-+	struct xencomm_handle *desc = NULL;
-+	int ret = 0;
-+
-+	user_op = (xen_domctl_t __user *)hypercall->arg[0];
++	int rc;
++	struct xencomm_handle *desc;
++	XENCOMM_MINI_ALIGNED(xc_area, 2);
 +
-+	if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
-+		return -EFAULT;
++	rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
++	if (rc)
++		return rc;
 +
-+	if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
-+		return -EACCES;
++	return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
 +
-+	op_desc = xencomm_create_inline(&kern_op);
++int
++xencomm_hypercall_sched_op(int cmd, void *arg)
++{
++	struct xencomm_handle *desc;
++	unsigned int argsize;
 +
-+	switch (kern_op.cmd) {
-+	case XEN_DOMCTL_createdomain:
-+	case XEN_DOMCTL_destroydomain:
-+	case XEN_DOMCTL_pausedomain:
-+	case XEN_DOMCTL_unpausedomain:
-+	case XEN_DOMCTL_getdomaininfo:
++	switch (cmd) {
++	case SCHEDOP_yield:
++	case SCHEDOP_block:
++		argsize = 0;
 +		break;
-+	case XEN_DOMCTL_getmemlist:
++	case SCHEDOP_shutdown:
++		argsize = sizeof(sched_shutdown_t);
++		break;
++	case SCHEDOP_remote_shutdown:
++		argsize = sizeof(sched_remote_shutdown_t);
++		break;
++	case SCHEDOP_poll:
 +	{
-+		unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;
++		sched_poll_t *poll = arg;
++		struct xencomm_handle *ports;
 +
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.getmemlist.buffer),
-+			nr_pages * sizeof(unsigned long),
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.getmemlist.buffer,
-+		                     (void *)desc);
++		argsize = sizeof(sched_poll_t);
++		ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
++				     sizeof(*xen_guest_handle(poll->ports)));
++
++		set_xen_guest_handle(poll->ports, (void *)ports);
 +		break;
 +	}
-+	case XEN_DOMCTL_getpageframeinfo:
-+		break;
-+	case XEN_DOMCTL_getpageframeinfo2:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.getpageframeinfo2.array),
-+			kern_op.u.getpageframeinfo2.num,
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
-+		                     (void *)desc);
-+		break;
-+	case XEN_DOMCTL_shadow_op:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
-+			ROUND_DIV(kern_op.u.shadow_op.pages, 8),
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
-+		                     (void *)desc);
-+		break;
-+	case XEN_DOMCTL_max_mem:
-+		break;
-+	case XEN_DOMCTL_setvcpucontext:
-+	case XEN_DOMCTL_getvcpucontext:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.vcpucontext.ctxt),
-+			sizeof(vcpu_guest_context_t),
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
-+		break;
-+	case XEN_DOMCTL_getvcpuinfo:
-+		break;
-+	case XEN_DOMCTL_setvcpuaffinity:
-+	case XEN_DOMCTL_getvcpuaffinity:
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
-+			ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8),
-+			&desc, GFP_KERNEL);
-+		set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
-+		                     (void *)desc);
-+		break;
-+	case XEN_DOMCTL_max_vcpus:
-+	case XEN_DOMCTL_scheduler_op:
-+	case XEN_DOMCTL_setdomainhandle:
-+	case XEN_DOMCTL_setdebugging:
-+	case XEN_DOMCTL_irq_permission:
-+	case XEN_DOMCTL_iomem_permission:
-+	case XEN_DOMCTL_ioport_permission:
-+	case XEN_DOMCTL_hypercall_init:
-+	case XEN_DOMCTL_arch_setup:
-+	case XEN_DOMCTL_settimeoffset:
-+	case XEN_DOMCTL_sendtrigger:
-+		break;
 +	default:
-+		printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
++		printk("%s: unknown sched op %d\n", __func__, cmd);
 +		return -ENOSYS;
 +	}
++	
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	if (ret) {
-+		/* error mapping the nested pointer */
-+		return ret;
-+	}
-+
-+	ret = xencomm_arch_hypercall_domctl (op_desc);
++	return xencomm_arch_hypercall_sched_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
 +
-+	/* FIXME: should we restore the handle?  */
-+	if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
-+		ret = -EFAULT;
-+
-+	if (desc)
-+		xencomm_free(desc);
-+	return ret;
-+}
-+
-+static int
-+xencomm_privcmd_acm_op(privcmd_hypercall_t *hypercall)
++int
++xencomm_hypercall_multicall(void *call_list, int nr_calls)
 +{
-+	void __user *arg = (void __user *)hypercall->arg[0];
-+	xen_acmctl_t kern_arg;
-+	struct xencomm_handle *op_desc;
-+	struct xencomm_handle *desc = NULL;
-+	int ret;
-+
-+	if (copy_from_user(&kern_arg, arg, sizeof(kern_arg)))
-+		return -EFAULT;
-+	if (kern_arg.interface_version != ACM_INTERFACE_VERSION)
-+		return -ENOSYS;
-+	
-+	switch (kern_arg.cmd) {
-+	case ACMOP_getssid: {
-+		op_desc = xencomm_create_inline(&kern_arg);
++	int rc;
++	int i;
++	multicall_entry_t *mce;
++	struct xencomm_handle *desc;
++	XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
 +
-+		ret = xencomm_create(
-+			xen_guest_handle(kern_arg.u.getssid.ssidbuf),
-+			kern_arg.u.getssid.ssidbuf_size, &desc, GFP_KERNEL);
-+		if (ret)
-+			return ret;
++	for (i = 0; i < nr_calls; i++) {
++		mce = (multicall_entry_t *)call_list + i;
 +
-+		set_xen_guest_handle(kern_arg.u.getssid.ssidbuf, (void *)desc);
++		switch (mce->op) {
++		case __HYPERVISOR_update_va_mapping:
++		case __HYPERVISOR_mmu_update:
++			/* No-op on ia64.  */
++			break;
++		case __HYPERVISOR_grant_table_op:
++			rc = xencommize_grant_table_op
++				(&xc_area,
++				 mce->args[0], (void *)mce->args[1],
++				 mce->args[2], &desc);
++			if (rc)
++				return rc;
++			mce->args[1] = (unsigned long)desc;
++			break;
++		case __HYPERVISOR_memory_op:
++		default:
++			printk("%s: unhandled multicall op entry op %lu\n",
++			       __func__, mce->op);
++			return -ENOSYS;
++		}
++	}
 +
-+		ret = xencomm_arch_hypercall_acm_op(op_desc);
++	desc = xencomm_map_no_alloc(call_list,
++				    nr_calls * sizeof(multicall_entry_t));
++	if (desc == NULL)
++		return -EINVAL;
 +
-+		xencomm_free(desc);
++	return xencomm_arch_hypercall_multicall(desc, nr_calls);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
 +
-+		if (copy_to_user(arg, &kern_arg, sizeof(kern_arg)))
-+			return -EFAULT;
-+		return ret;
-+	}
++int
++xencomm_hypercall_callback_op(int cmd, void *arg)
++{
++	unsigned int argsize;
++	switch (cmd)
++	{
++	case CALLBACKOP_register:
++		argsize = sizeof(struct callback_register);
++		break;
++	case CALLBACKOP_unregister:
++		argsize = sizeof(struct callback_unregister);
++		break;
 +	default:
-+		printk("%s: unknown acm_op cmd %d\n", __func__, kern_arg.cmd);
++		printk("%s: unknown callback op %d\n", __func__, cmd);
 +		return -ENOSYS;
 +	}
 +
-+	return ret;
++	return xencomm_arch_hypercall_callback_op
++		(cmd, xencomm_map_no_alloc(arg, argsize));
 +}
 +
 +static int
-+xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
++xencommize_memory_reservation(struct xencomm_mini *xc_area,
++			      xen_memory_reservation_t *mop)
 +{
-+	const unsigned long cmd = hypercall->arg[0];
-+	int ret = 0;
++	struct xencomm_handle *desc;
++
++	desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
++			mop->nr_extents *
++			sizeof(*xen_guest_handle(mop->extent_start)),
++			xc_area);
++	if (desc == NULL)
++		return -EINVAL;
++
++	set_xen_guest_handle(mop->extent_start, (void *)desc);
++	return 0;
++}
++
++int
++xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
++{
++	XEN_GUEST_HANDLE(xen_pfn_t) extent_start_va[2];
++	xen_memory_reservation_t *xmr = NULL, *xme_in = NULL, *xme_out = NULL;
++	xen_memory_map_t *memmap = NULL;
++	XEN_GUEST_HANDLE(void) buffer;
++	int rc;
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	XENCOMM_MINI_ALIGNED(xc_area, 2);
 +
 +	switch (cmd) {
 +	case XENMEM_increase_reservation:
 +	case XENMEM_decrease_reservation:
 +	case XENMEM_populate_physmap:
-+	{
-+		xen_memory_reservation_t kern_op;
-+		xen_memory_reservation_t __user *user_op;
-+		struct xencomm_handle *desc = NULL;
-+		struct xencomm_handle *desc_op;
++		xmr = (xen_memory_reservation_t *)arg;
++		set_xen_guest_handle(extent_start_va[0],
++				     xen_guest_handle(xmr->extent_start));
 +
-+		user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
-+		if (copy_from_user(&kern_op, user_op,
-+		                   sizeof(xen_memory_reservation_t)))
-+			return -EFAULT;
-+		desc_op = xencomm_create_inline(&kern_op);
++		argsize = sizeof(*xmr);
++		rc = xencommize_memory_reservation(xc_area, xmr);
++		if (rc)
++			return rc;
++		xc_area++;
++		break;
 +
-+		if (xen_guest_handle(kern_op.extent_start)) {
-+			void * addr;
++	case XENMEM_maximum_gpfn:
++		argsize = 0;
++		break;
 +
-+			addr = xen_guest_handle(kern_op.extent_start);
-+			ret = xencomm_create
-+				(addr,
-+				 kern_op.nr_extents *
-+				 sizeof(*xen_guest_handle
-+					(kern_op.extent_start)),
-+				 &desc, GFP_KERNEL);
-+			if (ret)
-+				return ret;
-+			set_xen_guest_handle(kern_op.extent_start,
-+			                     (void *)desc);
-+		}
++	case XENMEM_maximum_ram_page:
++		argsize = 0;
++		break;
 +
-+		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++	case XENMEM_exchange:
++		xme_in  = &((xen_memory_exchange_t *)arg)->in;
++		xme_out = &((xen_memory_exchange_t *)arg)->out;
++		set_xen_guest_handle(extent_start_va[0],
++				     xen_guest_handle(xme_in->extent_start));
++		set_xen_guest_handle(extent_start_va[1],
++				     xen_guest_handle(xme_out->extent_start));
 +
-+		if (desc)
-+			xencomm_free(desc);
++		argsize = sizeof(xen_memory_exchange_t);
++		rc = xencommize_memory_reservation(xc_area, xme_in);
++		if (rc)
++			return rc;
++		xc_area++;
++		rc = xencommize_memory_reservation(xc_area, xme_out);
++		if (rc)
++			return rc;
++		xc_area++;
++		break;
 +
-+		if (ret != 0)
-+			return ret;
++	case XENMEM_add_to_physmap:
++		argsize = sizeof(xen_add_to_physmap_t);
++		break;
 +
-+		if (copy_to_user(user_op, &kern_op,
-+		                 sizeof(xen_memory_reservation_t)))
-+			return -EFAULT;
++	case XENMEM_machine_memory_map:
++		argsize = sizeof(*memmap);
++		memmap = (xen_memory_map_t *)arg;
++		set_xen_guest_handle(buffer, xen_guest_handle(memmap->buffer));
++		desc = xencomm_map_no_alloc(xen_guest_handle(memmap->buffer),
++					      memmap->nr_entries);
++		if (desc == NULL)
++			return -EINVAL;
++		set_xen_guest_handle(memmap->buffer, (void *)desc);
++		break;
 +
-+		return ret;
++	default:
++		printk("%s: unknown memory op %d\n", __func__, cmd);
++		return -ENOSYS;
 +	}
-+	case XENMEM_translate_gpfn_list:
-+	{
-+		xen_translate_gpfn_list_t kern_op;
-+		xen_translate_gpfn_list_t __user *user_op;
-+		struct xencomm_handle *desc_gpfn = NULL;
-+		struct xencomm_handle *desc_mfn = NULL;
-+		struct xencomm_handle *desc_op;
-+		void *addr;
-+
-+		user_op = (xen_translate_gpfn_list_t __user *)
-+			hypercall->arg[1];
-+		if (copy_from_user(&kern_op, user_op,
-+		                   sizeof(xen_translate_gpfn_list_t)))
-+			return -EFAULT;
-+		desc_op = xencomm_create_inline(&kern_op);
-+
-+		if (kern_op.nr_gpfns) {
-+			/* gpfn_list.  */
-+			addr = xen_guest_handle(kern_op.gpfn_list);
-+
-+			ret = xencomm_create(addr, kern_op.nr_gpfns *
-+			                     sizeof(*xen_guest_handle
-+			                            (kern_op.gpfn_list)),
-+			                     &desc_gpfn, GFP_KERNEL);
-+			if (ret)
-+				return ret;
-+			set_xen_guest_handle(kern_op.gpfn_list,
-+			                     (void *)desc_gpfn);
-+
-+			/* mfn_list.  */
-+			addr = xen_guest_handle(kern_op.mfn_list);
 +
-+			ret = xencomm_create(addr, kern_op.nr_gpfns *
-+			                     sizeof(*xen_guest_handle
-+			                            (kern_op.mfn_list)),
-+			                     &desc_mfn, GFP_KERNEL);
-+			if (ret)
-+				return ret;
-+			set_xen_guest_handle(kern_op.mfn_list,
-+			                     (void *)desc_mfn);
-+		}
-+
-+		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+		if (desc_gpfn)
-+			xencomm_free(desc_gpfn);
++	rc = xencomm_arch_hypercall_memory_op(cmd, desc);
 +
-+		if (desc_mfn)
-+			xencomm_free(desc_mfn);
++	switch (cmd) {
++	case XENMEM_increase_reservation:
++	case XENMEM_decrease_reservation:
++	case XENMEM_populate_physmap:
++		set_xen_guest_handle(xmr->extent_start,
++				     xen_guest_handle(extent_start_va[0]));
++		break;
 +
-+		if (ret != 0)
-+			return ret;
++	case XENMEM_exchange:
++		set_xen_guest_handle(xme_in->extent_start,
++				     xen_guest_handle(extent_start_va[0]));
++		set_xen_guest_handle(xme_out->extent_start,
++				     xen_guest_handle(extent_start_va[1]));
++		break;
 +
-+		return ret;
-+	}
-+	default:
-+		printk("%s: unknown memory op %lu\n", __func__, cmd);
-+		ret = -ENOSYS;
++	case XENMEM_machine_memory_map:
++		set_xen_guest_handle(memmap->buffer, xen_guest_handle(buffer));
++		break;
 +	}
-+	return ret;
++
++	return rc;
 +}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
 +
-+static int
-+xencomm_privcmd_xen_version(privcmd_hypercall_t *hypercall)
++unsigned long
++xencomm_hypercall_hvm_op(int cmd, void *arg)
 +{
-+	int cmd = hypercall->arg[0];
-+	void __user *arg = (void __user *)hypercall->arg[1];
 +	struct xencomm_handle *desc;
-+	size_t argsize;
-+	int rc;
++	unsigned int argsize;
 +
 +	switch (cmd) {
-+	case XENVER_version:
-+		/* do not actually pass an argument */
-+		return xencomm_arch_hypercall_xen_version(cmd, 0);
-+	case XENVER_extraversion:
-+		argsize = sizeof(xen_extraversion_t);
-+		break;
-+	case XENVER_compile_info:
-+		argsize = sizeof(xen_compile_info_t);
-+		break;
-+	case XENVER_capabilities:
-+		argsize = sizeof(xen_capabilities_info_t);
-+		break;
-+	case XENVER_changeset:
-+		argsize = sizeof(xen_changeset_info_t);
-+		break;
-+	case XENVER_platform_parameters:
-+		argsize = sizeof(xen_platform_parameters_t);
-+		break;
-+	case XENVER_pagesize:
-+		argsize = (arg == NULL) ? 0 : sizeof(void *);
-+		break;
-+	case XENVER_get_features:
-+		argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
++	case HVMOP_get_param:
++	case HVMOP_set_param:
++		argsize = sizeof(xen_hvm_param_t);
 +		break;
-+
 +	default:
-+		printk("%s: unknown version op %d\n", __func__, cmd);
-+		return -ENOSYS;
++		printk("%s: unknown HVMOP %d\n", __func__, cmd);
++		return -EINVAL;
 +	}
 +
-+	rc = xencomm_create(arg, argsize, &desc, GFP_KERNEL);
-+	if (rc)
-+		return rc;
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	rc = xencomm_arch_hypercall_xen_version(cmd, desc);
++	return xencomm_arch_hypercall_hvm_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_hvm_op);
 +
-+	xencomm_free(desc);
++int
++xencomm_hypercall_suspend(unsigned long srec)
++{
++	struct sched_shutdown arg;
 +
-+	return rc;
++	arg.reason = SHUTDOWN_suspend;
++
++	return xencomm_arch_hypercall_suspend(
++		xencomm_map_no_alloc(&arg, sizeof(arg)));
 +}
 +
-+static int
-+xencomm_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
++int
++xencomm_hypercall_xenoprof_op(int op, void *arg)
 +{
-+	int cmd = hypercall->arg[0];
-+	struct xencomm_handle *desc;
 +	unsigned int argsize;
-+	int ret;
++	struct xencomm_handle *desc;
 +
-+	switch (cmd) {
-+	case EVTCHNOP_alloc_unbound:
-+		argsize = sizeof(evtchn_alloc_unbound_t);
++	switch (op) {
++	case XENOPROF_init:
++		argsize = sizeof(xenoprof_init_t);
 +		break;
-+
-+	case EVTCHNOP_status:
-+		argsize = sizeof(evtchn_status_t);
++	case XENOPROF_set_active:
++		argsize = sizeof(domid_t);
++		break;
++	case XENOPROF_set_passive:
++		argsize = sizeof(xenoprof_passive_t);
 +		break;
++	case XENOPROF_counter:
++		argsize = sizeof(xenoprof_counter_t);
++		break;
++	case XENOPROF_get_buffer:
++		argsize = sizeof(xenoprof_get_buffer_t);
++		break;
++
++	case XENOPROF_reset_active_list:
++	case XENOPROF_reset_passive_list:
++	case XENOPROF_reserve_counters:
++	case XENOPROF_setup_events:
++	case XENOPROF_enable_virq:
++	case XENOPROF_start:
++	case XENOPROF_stop:
++	case XENOPROF_disable_virq:
++	case XENOPROF_release_counters:
++	case XENOPROF_shutdown:
++		return xencomm_arch_hypercall_xenoprof_op(op, arg);
 +
 +	default:
-+		printk("%s: unknown EVTCHNOP %d\n", __func__, cmd);
-+		return -EINVAL;
++		printk("%s: op %d isn't supported\n", __func__, op);
++		return -ENOSYS;
 +	}
 +
-+	ret = xencomm_create((void *)hypercall->arg[1], argsize,
-+	                     &desc, GFP_KERNEL);
-+	if (ret)
-+		return ret;
-+
-+	ret = xencomm_arch_hypercall_event_channel_op(cmd, desc);
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	xencomm_free(desc);
-+	return ret;
++	return xencomm_arch_hypercall_xenoprof_op(op, desc);
 +}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_xenoprof_op);
 +
-+static int
-+xencomm_privcmd_hvm_op(privcmd_hypercall_t *hypercall)
++int
++xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
++                                  unsigned long count)
 +{
-+	int cmd = hypercall->arg[0];
-+	struct xencomm_handle *desc;
 +	unsigned int argsize;
-+	int ret;
++	struct xencomm_handle *desc;
 +
 +	switch (cmd) {
-+	case HVMOP_get_param:
-+	case HVMOP_set_param:
-+		argsize = sizeof(xen_hvm_param_t);
++	case PFM_GET_FEATURES:
++		argsize = sizeof(pfarg_features_t);
 +		break;
-+	case HVMOP_set_pci_intx_level:
-+		argsize = sizeof(xen_hvm_set_pci_intx_level_t);
++	case PFM_CREATE_CONTEXT:
++		argsize = sizeof(pfarg_context_t);
 +		break;
-+	case HVMOP_set_isa_irq_level:
-+		argsize = sizeof(xen_hvm_set_isa_irq_level_t);
++	case PFM_LOAD_CONTEXT:
++		argsize = sizeof(pfarg_load_t);
 +		break;
-+	case HVMOP_set_pci_link_route:
-+		argsize = sizeof(xen_hvm_set_pci_link_route_t);
++	case PFM_WRITE_PMCS:
++	case PFM_WRITE_PMDS:
++		argsize = sizeof(pfarg_reg_t) * count;
 +		break;
 +
++	case PFM_DESTROY_CONTEXT:
++	case PFM_UNLOAD_CONTEXT:
++	case PFM_START:
++	case PFM_STOP:
++		return xencomm_arch_hypercall_perfmon_op(cmd, arg, count);
++
 +	default:
-+		printk("%s: unknown HVMOP %d\n", __func__, cmd);
-+		return -EINVAL;
++		printk("%s:%d cmd %ld isn't supported\n",
++		       __func__, __LINE__, cmd);
++		BUG();
 +	}
 +
-+	ret = xencomm_create((void *)hypercall->arg[1], argsize,
-+	                     &desc, GFP_KERNEL);
-+	if (ret)
-+		return ret;
-+
-+	ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
 +
-+	xencomm_free(desc);
-+	return ret;
++	return xencomm_arch_hypercall_perfmon_op(cmd, desc, count);
 +}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_perfmon_op);
 +
-+static int
-+xencomm_privcmd_sched_op(privcmd_hypercall_t *hypercall)
++long
++xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
 +{
-+	int cmd = hypercall->arg[0];
-+	struct xencomm_handle *desc;
 +	unsigned int argsize;
-+	int ret;
-+
 +	switch (cmd) {
-+	case SCHEDOP_remote_shutdown:
-+		argsize = sizeof(sched_remote_shutdown_t);
++	case VCPUOP_register_runstate_memory_area: {
++		vcpu_register_runstate_memory_area_t *area =
++			(vcpu_register_runstate_memory_area_t *)arg;
++		argsize = sizeof(*arg);
++		set_xen_guest_handle(area->addr.h,
++		     (void *)xencomm_map_no_alloc(area->addr.v,
++						  sizeof(area->addr.v)));
 +		break;
++	}
++
 +	default:
-+		printk("%s: unknown SCHEDOP %d\n", __func__, cmd);
-+		return -EINVAL;
++		printk("%s: unknown vcpu op %d\n", __func__, cmd);
++		return -ENOSYS;
 +	}
 +
-+	ret = xencomm_create((void *)hypercall->arg[1], argsize,
-+	                     &desc, GFP_KERNEL);
-+	if (ret)
-+		return ret;
++	return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
++					xencomm_map_no_alloc(arg, argsize));
++}
 +
-+	ret = xencomm_arch_hypercall_sched_op(cmd, desc);
++long
++xencomm_hypercall_opt_feature(void *arg)
++{
++	return xencomm_arch_hypercall_opt_feature(
++		xencomm_map_no_alloc(arg,
++				     sizeof(struct xen_ia64_opt_feature)));
++}
 +
-+	xencomm_free(desc);
-+	return ret;
++int
++xencomm_hypercall_fpswa_revision(unsigned int *revision)
++{
++	struct xencomm_handle *desc;
++
++	desc = xencomm_map_no_alloc(revision, sizeof(*revision));
++	if (desc == NULL)
++		return -EINVAL;
++
++	return xencomm_arch_hypercall_fpswa_revision(desc);
 +}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_fpswa_revision);
 +
 +int
-+privcmd_hypercall(privcmd_hypercall_t *hypercall)
++xencomm_hypercall_kexec_op(int cmd, void *arg)
 +{
-+	switch (hypercall->op) {
-+	case __HYPERVISOR_platform_op:
-+		return xencomm_privcmd_platform_op(hypercall);
-+	case __HYPERVISOR_domctl:
-+		return xencomm_privcmd_domctl(hypercall);
-+	case __HYPERVISOR_sysctl:
-+		return xencomm_privcmd_sysctl(hypercall);
-+        case __HYPERVISOR_acm_op:
-+		return xencomm_privcmd_acm_op(hypercall);
-+	case __HYPERVISOR_xen_version:
-+		return xencomm_privcmd_xen_version(hypercall);
-+	case __HYPERVISOR_memory_op:
-+		return xencomm_privcmd_memory_op(hypercall);
-+	case __HYPERVISOR_event_channel_op:
-+		return xencomm_privcmd_event_channel_op(hypercall);
-+	case __HYPERVISOR_hvm_op:
-+		return xencomm_privcmd_hvm_op(hypercall);
-+	case __HYPERVISOR_sched_op:
-+		return xencomm_privcmd_sched_op(hypercall);
++	unsigned int argsize;
++	struct xencomm_handle *desc;
++
++	switch (cmd) {
++	case KEXEC_CMD_kexec_get_range:
++		argsize = sizeof(xen_kexec_range_t);
++		break;
++	case KEXEC_CMD_kexec_load:
++	case KEXEC_CMD_kexec_unload:
++		argsize = sizeof(xen_kexec_load_t);
++		break;
++	case KEXEC_CMD_kexec:
++		argsize = sizeof(xen_kexec_exec_t);
++		break;
 +	default:
-+		printk("%s: unknown hcall (%ld)\n", __func__, hypercall->op);
-+		return -ENOSYS;
++		printk("%s:%d cmd %d isn't supported\n",
++		       __func__, __LINE__, cmd);
++		BUG();
 +	}
-+}
 +
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xencomm.c tmp-linux-2.6-xen.patch/arch/ia64/xen/xencomm.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/xencomm.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xencomm.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,263 @@
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc == NULL)
++		return -EINVAL;
++
++	return xencomm_arch_hypercall_kexec_op(cmd, desc);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xcom_privcmd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xcom_privcmd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,902 @@
 +/*
-+ * Copyright (C) 2006 Hollis Blanchard <hollisb at us.ibm.com>, IBM Corporation
-+ *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
-+ * 
++ *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
-+ * 
++ *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ *          Tristan Gingold <tristan.gingold at bull.net>
 + */
-+
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
 +#include <linux/gfp.h>
-+#include <linux/mm.h>
++#include <linux/module.h>
 +#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#define __XEN__
++#include <xen/interface/domctl.h>
++#include <xen/interface/sysctl.h>
++#include <xen/interface/memory.h>
++#include <xen/interface/version.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/xsm/acm_ops.h>
++#include <xen/interface/hvm/params.h>
++#include <xen/interface/arch-ia64/debug_op.h>
++#include <xen/public/privcmd.h>
++#include <asm/hypercall.h>
 +#include <asm/page.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
++#include <asm/uaccess.h>
 +#include <asm/xen/xencomm.h>
 +
-+static int xencomm_debug = 0;
-+
-+static unsigned long kernel_start_pa;
-+
-+void
-+xencomm_init (void)
-+{
-+	kernel_start_pa = KERNEL_START - ia64_tpa(KERNEL_START);
-+}
++#define ROUND_DIV(v,s) (((v) + (s) - 1) / (s))
 +
-+/* Translate virtual address to physical address.  */
-+unsigned long
-+xencomm_vaddr_to_paddr(unsigned long vaddr)
++static int
++xencomm_privcmd_platform_op(privcmd_hypercall_t *hypercall)
 +{
-+#ifndef CONFIG_VMX_GUEST
-+	struct page *page;
-+	struct vm_area_struct *vma;
-+#endif
-+
-+	if (vaddr == 0)
-+		return 0;
-+
-+#ifdef __ia64__
-+	if (REGION_NUMBER(vaddr) == 5) {
-+		pgd_t *pgd;
-+		pud_t *pud;
-+		pmd_t *pmd;
-+		pte_t *ptep;
-+
-+		/* On ia64, TASK_SIZE refers to current.  It is not initialized
-+		   during boot.
-+		   Furthermore the kernel is relocatable and __pa() doesn't
-+		   work on  addresses.  */
-+		if (vaddr >= KERNEL_START
-+		    && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) {
-+			return vaddr - kernel_start_pa;
-+		}
-+
-+		/* In kernel area -- virtually mapped.  */
-+		pgd = pgd_offset_k(vaddr);
-+		if (pgd_none(*pgd) || pgd_bad(*pgd))
-+			return ~0UL;
++	struct xen_platform_op kern_op;
++	struct xen_platform_op __user *user_op = (struct xen_platform_op __user *)hypercall->arg[0];
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret = 0;
 +
-+		pud = pud_offset(pgd, vaddr);
-+		if (pud_none(*pud) || pud_bad(*pud))
-+			return ~0UL;
++	if (copy_from_user(&kern_op, user_op, sizeof(struct xen_platform_op)))
++		return -EFAULT;
 +
-+		pmd = pmd_offset(pud, vaddr);
-+		if (pmd_none(*pmd) || pmd_bad(*pmd))
-+			return ~0UL;
++	if (kern_op.interface_version != XENPF_INTERFACE_VERSION)
++		return -EACCES;
 +
-+		ptep = pte_offset_kernel(pmd, vaddr);
-+		if (!ptep)
-+			return ~0UL;
++	op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
 +
-+		return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
++	switch (kern_op.cmd) {
++	default:
++		printk("%s: unknown platform cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
 +	}
-+#endif
 +
-+	if (vaddr > TASK_SIZE) {
-+		/* kernel address */
-+		return __pa(vaddr);
++	if (ret) {
++		/* error mapping the nested pointer */
++		return ret;
 +	}
 +
++	ret = xencomm_arch_hypercall_platform_op(op_desc);
 +
-+#ifdef CONFIG_VMX_GUEST
-+	/* No privcmd within vmx guest.  */
-+	return ~0UL;
-+#else
-+	/* XXX double-check (lack of) locking */
-+	vma = find_extend_vma(current->mm, vaddr);
-+	if (!vma)
-+		return ~0UL;
-+
-+	/* We assume the page is modified.  */
-+	page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
-+	if (!page)
-+		return ~0UL;
++	/* FIXME: should we restore the handle?  */
++	if (copy_to_user(user_op, &kern_op, sizeof(struct xen_platform_op)))
++		ret = -EFAULT;
 +
-+	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
-+#endif
++	xencomm_free(desc);
++	return ret;
 +}
 +
 +static int
-+xencomm_init_desc(struct xencomm_desc *desc, void *buffer, unsigned long bytes)
++xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
 +{
-+	unsigned long recorded = 0;
-+	int i = 0;
++	xen_sysctl_t kern_op;
++	xen_sysctl_t __user *user_op;
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	struct xencomm_handle *desc1 = NULL;
++	int ret = 0;
 +
-+	BUG_ON((buffer == NULL) && (bytes > 0));
++	user_op = (xen_sysctl_t __user *)hypercall->arg[0];
 +
-+	/* record the physical pages used */
-+	if (buffer == NULL)
-+		desc->nr_addrs = 0;
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
++		return -EFAULT;
 +
-+	while ((recorded < bytes) && (i < desc->nr_addrs)) {
-+		unsigned long vaddr = (unsigned long)buffer + recorded;
-+		unsigned long paddr;
-+		int offset;
-+		int chunksz;
++	if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
++		return -EACCES;
 +
-+		offset = vaddr % PAGE_SIZE; /* handle partial pages */
-+		chunksz = min(PAGE_SIZE - offset, bytes - recorded);
++	op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
 +
-+		paddr = xencomm_vaddr_to_paddr(vaddr);
-+		if (paddr == ~0UL) {
-+			printk("%s: couldn't translate vaddr %lx\n",
-+			       __func__, vaddr);
-+			return -EINVAL;
++	switch (kern_op.cmd) {
++	case XEN_SYSCTL_readconsole:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.readconsole.buffer),
++			kern_op.u.readconsole.count);
++		if (xen_guest_handle(kern_op.u.readconsole.buffer) != NULL &&
++		    kern_op.u.readconsole.count > 0 && desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.readconsole.buffer,
++		                     (void *)desc);
++		break;
++	case XEN_SYSCTL_tbuf_op:
++	case XEN_SYSCTL_sched_id:
++	case XEN_SYSCTL_availheap:
++		break;
++	case XEN_SYSCTL_perfc_op:
++	{
++		struct xencomm_handle *tmp_desc;
++		xen_sysctl_t tmp_op = {
++			.cmd = XEN_SYSCTL_perfc_op,
++			.interface_version = XEN_SYSCTL_INTERFACE_VERSION,
++			.u.perfc_op = {
++				.cmd = XEN_SYSCTL_PERFCOP_query,
++				/* .desc.p = NULL, */
++				/* .val.p = NULL, */
++			},
++		};
++
++		if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
++			if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
++				return -EINVAL;
++			break;
 +		}
 +
-+		desc->address[i++] = paddr;
-+		recorded += chunksz;
++		/* query the buffer size for xencomm */
++		tmp_desc = xencomm_map_no_alloc(&tmp_op, sizeof(tmp_op));
++		ret = xencomm_arch_hypercall_sysctl(tmp_desc);
++		if (ret)
++			return ret;
++
++		desc = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.desc),
++				   tmp_op.u.perfc_op.nr_counters *
++				   sizeof(xen_sysctl_perfc_desc_t));
++		if (xen_guest_handle(kern_op.u.perfc_op.desc) != NULL &&
++		    tmp_op.u.perfc_op.nr_counters > 0 && desc == NULL)
++			return -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);
++
++		desc1 = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.val),
++				    tmp_op.u.perfc_op.nr_vals *
++				    sizeof(xen_sysctl_perfc_val_t));
++		if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL &&
++		    tmp_op.u.perfc_op.nr_vals > 0 && desc1 == NULL) {
++			xencomm_free(desc);
++			return -ENOMEM;
++		}
++
++		set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
++		break;
++	}
++	case XEN_SYSCTL_getdomaininfolist:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
++			kern_op.u.getdomaininfolist.max_domains *
++			sizeof(xen_domctl_getdomaininfo_t));
++		if (xen_guest_handle(kern_op.u.getdomaininfolist.buffer) !=
++		    NULL && kern_op.u.getdomaininfolist.max_domains > 0 &&
++		    desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
++				     (void *)desc);
++		break;
++	case XEN_SYSCTL_debug_keys:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.debug_keys.keys),
++			kern_op.u.debug_keys.nr_keys);
++		if (xen_guest_handle(kern_op.u.debug_keys.keys) != NULL &&
++		    kern_op.u.debug_keys.nr_keys > 0 && desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.debug_keys.keys,
++				     (void *)desc);
++		break;
++
++	case XEN_SYSCTL_physinfo:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
++			kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
++		if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
++		    kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
++			return -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
++		                     (void *)desc);
++		break;
++	default:
++		printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
 +	}
 +
-+	if (recorded < bytes) {
-+		printk("%s: could only translate %ld of %ld bytes\n",
-+		       __func__, recorded, bytes);
-+		return -ENOSPC;
++	if (ret) {
++		/* error mapping the nested pointer */
++		return ret;
 +	}
 +
-+	/* mark remaining addresses invalid (just for safety) */
-+	while (i < desc->nr_addrs)
-+		desc->address[i++] = XENCOMM_INVALID;
++	ret = xencomm_arch_hypercall_sysctl(op_desc);
 +
-+	desc->magic = XENCOMM_MAGIC;
++	/* FIXME: should we restore the handles?  */
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
++		ret = -EFAULT;
 +
-+	return 0;
++	xencomm_free(desc);
++	xencomm_free(desc1);
++	return ret;
 +}
 +
-+static struct xencomm_desc *
-+xencomm_alloc(gfp_t gfp_mask)
++static int
++xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
 +{
-+	struct xencomm_desc *desc;
++	xen_domctl_t kern_op;
++	xen_domctl_t __user *user_op;
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret = 0;
 +
-+	desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
-+	if (desc == NULL)
-+		panic("%s: page allocation failed\n", __func__);
++	user_op = (xen_domctl_t __user *)hypercall->arg[0];
 +
-+	desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
-+	                 sizeof(*desc->address);
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
++		return -EFAULT;
 +
-+	return desc;
++	if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
++		return -EACCES;
++
++	op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++	switch (kern_op.cmd) {
++	case XEN_DOMCTL_createdomain:
++	case XEN_DOMCTL_destroydomain:
++	case XEN_DOMCTL_pausedomain:
++	case XEN_DOMCTL_unpausedomain:
++	case XEN_DOMCTL_resumedomain:
++	case XEN_DOMCTL_getdomaininfo:
++		break;
++	case XEN_DOMCTL_getmemlist:
++	{
++		unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;
++
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getmemlist.buffer),
++			nr_pages * sizeof(unsigned long));
++		if (xen_guest_handle(kern_op.u.getmemlist.buffer) != NULL &&
++		    nr_pages > 0 && desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.getmemlist.buffer,
++		                     (void *)desc);
++		break;
++	}
++	case XEN_DOMCTL_getpageframeinfo:
++		break;
++	case XEN_DOMCTL_getpageframeinfo2:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getpageframeinfo2.array),
++			kern_op.u.getpageframeinfo2.num);
++		if (xen_guest_handle(kern_op.u.getpageframeinfo2.array) !=
++		    NULL && kern_op.u.getpageframeinfo2.num > 0 &&
++		    desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
++		                     (void *)desc);
++		break;
++	case XEN_DOMCTL_shadow_op:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
++			ROUND_DIV(kern_op.u.shadow_op.pages, 8));
++		if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap) != NULL
++		    && kern_op.u.shadow_op.pages > 0 && desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
++		                     (void *)desc);
++		break;
++	case XEN_DOMCTL_max_mem:
++		break;
++	case XEN_DOMCTL_setvcpucontext:
++	case XEN_DOMCTL_getvcpucontext:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.vcpucontext.ctxt),
++			sizeof(vcpu_guest_context_t));
++		if (xen_guest_handle(kern_op.u.vcpucontext.ctxt) != NULL &&
++		    desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
++		break;
++	case XEN_DOMCTL_getvcpuinfo:
++		break;
++	case XEN_DOMCTL_setvcpuaffinity:
++	case XEN_DOMCTL_getvcpuaffinity:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
++			ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8));
++		if (xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap) !=
++		    NULL && kern_op.u.vcpuaffinity.cpumap.nr_cpus > 0 &&
++		    desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
++		                     (void *)desc);
++		break;
++	case XEN_DOMCTL_gethvmcontext:
++	case XEN_DOMCTL_sethvmcontext:
++		if (kern_op.u.hvmcontext.size > 0)
++			desc = xencomm_map(
++				xen_guest_handle(kern_op.u.hvmcontext.buffer),
++				kern_op.u.hvmcontext.size);
++		if (xen_guest_handle(kern_op.u.hvmcontext.buffer) != NULL &&
++		    kern_op.u.hvmcontext.size > 0 && desc == NULL)
++			return -ENOMEM;
++		set_xen_guest_handle(kern_op.u.hvmcontext.buffer, (void*)desc);
++		break;
++	case XEN_DOMCTL_max_vcpus:
++	case XEN_DOMCTL_scheduler_op:
++	case XEN_DOMCTL_setdomainhandle:
++	case XEN_DOMCTL_setdebugging:
++	case XEN_DOMCTL_irq_permission:
++	case XEN_DOMCTL_iomem_permission:
++	case XEN_DOMCTL_ioport_permission:
++	case XEN_DOMCTL_hypercall_init:
++	case XEN_DOMCTL_arch_setup:
++	case XEN_DOMCTL_settimeoffset:
++	case XEN_DOMCTL_sendtrigger:
++	case XEN_DOMCTL_set_opt_feature:
++	case XEN_DOMCTL_assign_device:
++	case XEN_DOMCTL_subscribe:
++		break;
++	case XEN_DOMCTL_pin_mem_cacheattr:
++		return -ENOSYS;
++	default:
++		printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
++	}
++
++	if (ret) {
++		/* error mapping the nested pointer */
++		return ret;
++	}
++
++	ret = xencomm_arch_hypercall_domctl (op_desc);
++
++	/* FIXME: should we restore the handle?  */
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
++		ret = -EFAULT;
++
++	xencomm_free(desc);
++	return ret;
 +}
 +
-+void
-+xencomm_free(struct xencomm_handle *desc)
++static int
++xencomm_privcmd_xsm_op(privcmd_hypercall_t *hypercall)
 +{
-+	if (desc)
-+		free_page((unsigned long)__va(desc));
++	void __user *arg = (void __user *)hypercall->arg[0];
++	xen_acmctl_t kern_arg;
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret;
++
++	if (copy_from_user(&kern_arg, arg, sizeof(kern_arg)))
++		return -EFAULT;
++	if (kern_arg.interface_version != ACM_INTERFACE_VERSION)
++		return -ENOSYS;
++	
++	switch (kern_arg.cmd) {
++	case ACMOP_getssid: {
++		op_desc = xencomm_map_no_alloc(&kern_arg, sizeof(kern_arg));
++
++		desc = xencomm_map(
++			xen_guest_handle(kern_arg.u.getssid.ssidbuf),
++			kern_arg.u.getssid.ssidbuf_size);
++		if (xen_guest_handle(kern_arg.u.getssid.ssidbuf) != NULL &&
++		    kern_arg.u.getssid.ssidbuf_size > 0 && desc == NULL)
++			return -ENOMEM;
++
++		set_xen_guest_handle(kern_arg.u.getssid.ssidbuf, (void *)desc);
++
++		ret = xencomm_arch_hypercall_xsm_op(op_desc);
++
++		xencomm_free(desc);
++
++		if (copy_to_user(arg, &kern_arg, sizeof(kern_arg)))
++			return -EFAULT;
++		return ret;
++	}
++	default:
++		printk("%s: unknown acm_op cmd %d\n", __func__, kern_arg.cmd);
++		return -ENOSYS;
++	}
++
++	return ret;
 +}
 +
-+int
-+xencomm_create(void *buffer, unsigned long bytes,
-+               struct xencomm_handle **ret, gfp_t gfp_mask)
++static int
++xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
 +{
-+	struct xencomm_desc *desc;
-+	struct xencomm_handle *handle;
-+	int rc;
++	const unsigned long cmd = hypercall->arg[0];
++	int ret = 0;
++	xen_memory_reservation_t kern_op;
++	xen_memory_reservation_t __user *user_op;
++	struct xencomm_handle *desc = NULL;
++	struct xencomm_handle *desc_op;
 +
-+	if (xencomm_debug)
-+		printk("%s: %p[%ld]\n", __func__, buffer, bytes);
++	user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
++	if (copy_from_user(&kern_op, user_op,
++			   sizeof(xen_memory_reservation_t)))
++		return -EFAULT;
++	desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
 +
-+	if (buffer == NULL || bytes == 0) {
-+		*ret = (struct xencomm_handle *)NULL;
-+		return 0;
-+	}
++	if (!xen_guest_handle(kern_op.extent_start)) {
++		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++		if (ret < 0)
++			return ret;
++	} else {
++		xen_ulong_t nr_done = 0;
++		xen_ulong_t nr_extents = kern_op.nr_extents;
++		void *addr = xen_guest_handle(kern_op.extent_start);
++			
++		/*
++		 * Work around.
++		 *   Xencomm has single page size limit caused
++		 *   by xencomm_alloc()/xencomm_free() so that
++		 *   we have to repeat the hypercall.
++		 *   This limitation can be removed.
++		 */
++#define MEMORYOP_XENCOMM_LIMIT						\
++		(((((PAGE_SIZE - sizeof(struct xencomm_desc)) /		\
++		    sizeof(uint64_t)) - 2) * PAGE_SIZE) /		\
++		 sizeof(*xen_guest_handle(kern_op.extent_start)))
 +
-+	desc = xencomm_alloc(gfp_mask);
-+	if (!desc) {
-+		printk("%s failure\n", "xencomm_alloc");
-+		return -ENOMEM;
-+	}
-+	handle = (struct xencomm_handle *)__pa(desc);
++		/*
++		 * Work around.
++		 *   Even if the above limitation is removed,
++		 *   the hypercall with large number of extents 
++		 *   may cause the soft lockup warning.
++		 *   In order to avoid the warning, we limit
++		 *   the number of extents and repeat the hypercall.
++		 *   The following value is determined by evaluation.
++		 *   Time of one hypercall should be smaller than
++		 *   a vcpu time slice. The time with current
++		 *   MEMORYOP_MAX_EXTENTS is around 5 msec.
++		 *   If the following limit causes some issues,
++		 *   we should decrease this value.
++		 *
++		 *   Another way would be that start with small value and
++		 *   increase adoptively measuring hypercall time.
++		 *   It might be over-kill.
++		 */
++#define MEMORYOP_MAX_EXTENTS	(MEMORYOP_XENCOMM_LIMIT / 512)
 +
-+	rc = xencomm_init_desc(desc, buffer, bytes);
-+	if (rc) {
-+		printk("%s failure: %d\n", "xencomm_init_desc", rc);
-+		xencomm_free(handle);
-+		return rc;
++		while (nr_extents > 0) {
++			xen_ulong_t nr_tmp = nr_extents;
++			if (nr_tmp > MEMORYOP_MAX_EXTENTS)
++				nr_tmp = MEMORYOP_MAX_EXTENTS;
++
++			kern_op.nr_extents = nr_tmp;
++			desc = xencomm_map
++				(addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
++				 nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)));
++			if (addr != NULL && nr_tmp > 0 && desc == NULL)
++				return nr_done > 0 ? nr_done : -ENOMEM;
++
++			set_xen_guest_handle(kern_op.extent_start,
++					     (void *)desc);
++
++			ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++			xencomm_free(desc);
++			if (ret < 0)
++				return nr_done > 0 ? nr_done : ret;
++
++			nr_done += ret;
++			nr_extents -= ret;
++			if (ret < nr_tmp)
++				break;
++
++			/*
++			 * prevent softlock up message.
++			 * give cpu to soft lockup kernel thread.
++			 */
++			if (nr_extents > 0)
++				schedule();
++		}
++		ret = nr_done;
++		set_xen_guest_handle(kern_op.extent_start, addr);
 +	}
 +
-+	*ret = handle;
-+	return 0;
-+}
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
++		return -EFAULT;
 +
-+/* "mini" routines, for stack-based communications: */
++	return ret;
++}
 +
-+static void *
-+xencomm_alloc_mini(struct xencomm_mini *area, int *nbr_area)
++static int
++xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
 +{
-+	unsigned long base;
-+	unsigned int pageoffset;
++	const unsigned long cmd = hypercall->arg[0];
++	int ret = 0;
++
++	switch (cmd) {
++	case XENMEM_increase_reservation:
++	case XENMEM_decrease_reservation:
++	case XENMEM_populate_physmap:
++		return xencomm_privcmd_memory_reservation_op(hypercall);
++	case XENMEM_maximum_gpfn:
++	{
++		domid_t kern_domid;
++		domid_t __user *user_domid;
++		struct xencomm_handle *desc;
 +
-+	while (*nbr_area >= 0) {
-+		/* Allocate an area.  */
-+		(*nbr_area)--;
++		user_domid = (domid_t __user *)hypercall->arg[1];
++		if (copy_from_user(&kern_domid, user_domid, sizeof(domid_t)))
++			return -EFAULT;
++		desc = xencomm_map_no_alloc(&kern_domid, sizeof(kern_domid));
 +
-+		base = (unsigned long)(area + *nbr_area);
-+		pageoffset = base % PAGE_SIZE;
++		ret = xencomm_arch_hypercall_memory_op(cmd, desc);
 +
-+		/* If the area does not cross a page, use it.  */
-+		if ((PAGE_SIZE - pageoffset) >= sizeof(struct xencomm_mini))
-+			return &area[*nbr_area];
++		return ret;
 +	}
-+	/* No more area.  */
-+	return NULL;
++	case XENMEM_translate_gpfn_list:
++	{
++		xen_translate_gpfn_list_t kern_op;
++		xen_translate_gpfn_list_t __user *user_op;
++		struct xencomm_handle *desc_gpfn = NULL;
++		struct xencomm_handle *desc_mfn = NULL;
++		struct xencomm_handle *desc_op;
++		void *addr;
++
++		user_op = (xen_translate_gpfn_list_t __user *)
++			hypercall->arg[1];
++		if (copy_from_user(&kern_op, user_op,
++		                   sizeof(xen_translate_gpfn_list_t)))
++			return -EFAULT;
++		desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++		if (kern_op.nr_gpfns) {
++			/* gpfn_list.  */
++			addr = xen_guest_handle(kern_op.gpfn_list);
++
++			desc_gpfn = xencomm_map(addr, kern_op.nr_gpfns *
++			                     sizeof(*xen_guest_handle
++			                            (kern_op.gpfn_list)));
++			if (addr != NULL && kern_op.nr_gpfns > 0 &&
++			    desc_gpfn == NULL)
++				return -ENOMEM;
++			set_xen_guest_handle(kern_op.gpfn_list,
++			                     (void *)desc_gpfn);
++
++			/* mfn_list.  */
++			addr = xen_guest_handle(kern_op.mfn_list);
++
++			desc_mfn = xencomm_map(addr, kern_op.nr_gpfns *
++			                     sizeof(*xen_guest_handle
++			                            (kern_op.mfn_list)));
++			if (addr != NULL && kern_op.nr_gpfns > 0 &&
++			    desc_mfn == NULL) {
++				xencomm_free(desc_gpfn);
++				return -ENOMEM;
++			}
++
++			set_xen_guest_handle(kern_op.mfn_list,
++			                     (void *)desc_mfn);
++		}
++
++		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++
++		xencomm_free(desc_gpfn);
++		xencomm_free(desc_mfn);
++
++		if (ret != 0)
++			return ret;
++
++		return ret;
++	}
++	default:
++		printk("%s: unknown memory op %lu\n", __func__, cmd);
++		ret = -ENOSYS;
++	}
++	return ret;
 +}
 +
-+int
-+xencomm_create_mini(struct xencomm_mini *area, int *nbr_area,
-+                    void *buffer, unsigned long bytes,
-+                    struct xencomm_handle **ret)
++static int
++xencomm_privcmd_xen_version(privcmd_hypercall_t *hypercall)
 +{
-+	struct xencomm_desc *desc;
++	int cmd = hypercall->arg[0];
++	void __user *arg = (void __user *)hypercall->arg[1];
++	struct xencomm_handle *desc;
++	size_t argsize;
 +	int rc;
-+	unsigned long res;
 +
-+	desc = xencomm_alloc_mini(area, nbr_area);
-+	if (!desc)
++	switch (cmd) {
++	case XENVER_version:
++		/* do not actually pass an argument */
++		return xencomm_arch_hypercall_xen_version(cmd, 0);
++	case XENVER_extraversion:
++		argsize = sizeof(xen_extraversion_t);
++		break;
++	case XENVER_compile_info:
++		argsize = sizeof(xen_compile_info_t);
++		break;
++	case XENVER_capabilities:
++		argsize = sizeof(xen_capabilities_info_t);
++		break;
++	case XENVER_changeset:
++		argsize = sizeof(xen_changeset_info_t);
++		break;
++	case XENVER_platform_parameters:
++		argsize = sizeof(xen_platform_parameters_t);
++		break;
++	case XENVER_pagesize:
++		argsize = (arg == NULL) ? 0 : sizeof(void *);
++		break;
++	case XENVER_get_features:
++		argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
++		break;
++
++	default:
++		printk("%s: unknown version op %d\n", __func__, cmd);
++		return -ENOSYS;
++	}
++
++	desc = xencomm_map(arg, argsize);
++	if (arg != NULL && argsize > 0 && desc == NULL)
 +		return -ENOMEM;
-+	desc->nr_addrs = XENCOMM_MINI_ADDRS;
 +
-+	rc = xencomm_init_desc(desc, buffer, bytes);
-+	if (rc)
-+		return rc;
++	rc = xencomm_arch_hypercall_xen_version(cmd, desc);
 +
-+	res = xencomm_vaddr_to_paddr((unsigned long)desc);
-+	if (res == ~0UL)
-+		return -EINVAL;
++	xencomm_free(desc);
 +
-+	*ret = (struct xencomm_handle*)res;
-+	return 0;
++	return rc;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xenentry.S tmp-linux-2.6-xen.patch/arch/ia64/xen/xenentry.S
---- pristine-linux-2.6.18.2/arch/ia64/xen/xenentry.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xenentry.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,931 @@
-+/*
-+ * ia64/xen/entry.S
-+ *
-+ * Alternate kernel routines for Xen.  Heavily leveraged from
-+ *   ia64/kernel/entry.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at .hp.com>
-+ */
 +
-+#include <asm/asmmacro.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/kregs.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/pgtable.h>
-+#include <asm/percpu.h>
-+#include <asm/processor.h>
-+#include <asm/thread_info.h>
-+#include <asm/unistd.h>
++static int
++xencomm_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	int ret;
 +
-+#ifdef CONFIG_XEN
-+#include "xenminstate.h"
-+#else
-+#include "minstate.h"
-+#endif
++	switch (cmd) {
++	case EVTCHNOP_alloc_unbound:
++		argsize = sizeof(evtchn_alloc_unbound_t);
++		break;
 +
-+/*
-+ * prev_task <- ia64_switch_to(struct task_struct *next)
-+ *	With Ingo's new scheduler, interrupts are disabled when this routine gets
-+ *	called.  The code starting at .map relies on this.  The rest of the code
-+ *	doesn't care about the interrupt masking status.
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_switch_to)
-+	.prologue
-+	alloc r16=ar.pfs,1,0,0,0
-+	movl r22=running_on_xen;;
-+	ld4 r22=[r22];;
-+	cmp.eq p7,p0=r22,r0
-+(p7)	br.cond.sptk.many __ia64_switch_to;;
-+#else
-+GLOBAL_ENTRY(ia64_switch_to)
-+	.prologue
-+	alloc r16=ar.pfs,1,0,0,0
-+#endif
-+	DO_SAVE_SWITCH_STACK
-+	.body
++	case EVTCHNOP_status:
++		argsize = sizeof(evtchn_status_t);
++		break;
 +
-+	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-+	movl r25=init_task
-+	mov r27=IA64_KR(CURRENT_STACK)
-+	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-+	dep r20=0,in0,61,3		// physical address of "next"
-+	;;
-+	st8 [r22]=sp			// save kernel stack pointer of old task
-+	shr.u r26=r20,IA64_GRANULE_SHIFT
-+	cmp.eq p7,p6=r25,in0
-+	;;
++	default:
++		printk("%s: unknown EVTCHNOP %d\n", __func__, cmd);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[1], argsize);
++	if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++		return -ENOMEM;
++
++	ret = xencomm_arch_hypercall_event_channel_op(cmd, desc);
++
++	xencomm_free(desc);
++	return ret;
++}
++
++static int
++xencomm_privcmd_hvm_op_track_dirty_vram(privcmd_hypercall_t *hypercall)
++{
++#if 1
 +	/*
-+	 * If we've already mapped this task's page, we can skip doing it again.
++	 * At this moment HVMOP_track_dirty_vram isn't implemented
++	 * on xen/ia64 so that it just returns -ENOSYS.
++	 * Don't issue hypercall to get -ENOSYS.
++	 * When the hypercall is implemented, enable the following codes.
 +	 */
-+(p6)	cmp.eq p7,p6=r26,r27
-+(p6)	br.cond.dpnt .map
-+	;;
-+.done:
-+	ld8 sp=[r21]			// load kernel stack pointer of new task
-+#ifdef CONFIG_XEN
-+	// update "current" application register
-+	mov r8=IA64_KR_CURRENT
-+	mov r9=in0;;
-+	XEN_HYPER_SET_KR
++	return -ENOSYS;
 +#else
-+	mov IA64_KR(CURRENT)=in0	// update "current" application register
++	int cmd = hypercall->arg[0];
++	struct xen_hvm_track_dirty_vram *user_op = (void*)hypercall->arg[1];
++	struct xen_hvm_track_dirty_vram kern_op;
++	struct xencomm_handle *desc;
++	struct xencomm_handle *bitmap_desc;
++	int ret;
++
++	BUG_ON(cmd != HVMOP_track_dirty_vram);
++	if (copy_from_user(&kern_op, user_op, sizeof(kern_op)))
++		return -EFAULT;
++	desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++	bitmap_desc = xencomm_map(xen_guest_handle(kern_op.dirty_bitmap),
++				  kern_op.nr * sizeof(uint8_t));
++	if (bitmap_desc == NULL)
++		return -ENOMEM;
++	set_xen_guest_handle(kern_op.dirty_bitmap, (void*)bitmap_desc);
++	ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
++	xencomm_free(bitmap_desc);
++
++	return ret;
++#endif
++}
++
++static int
++xencomm_privcmd_hvm_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	int ret;
++
++	switch (cmd) {
++	case HVMOP_get_param:
++	case HVMOP_set_param:
++		argsize = sizeof(xen_hvm_param_t);
++		break;
++	case HVMOP_set_pci_intx_level:
++		argsize = sizeof(xen_hvm_set_pci_intx_level_t);
++		break;
++	case HVMOP_set_isa_irq_level:
++		argsize = sizeof(xen_hvm_set_isa_irq_level_t);
++		break;
++	case HVMOP_set_pci_link_route:
++		argsize = sizeof(xen_hvm_set_pci_link_route_t);
++		break;
++	case HVMOP_set_mem_type:
++		argsize = sizeof(xen_hvm_set_mem_type_t);
++		break;
++
++	case HVMOP_track_dirty_vram:
++		return xencomm_privcmd_hvm_op_track_dirty_vram(hypercall);
++
++	default:
++		printk("%s: unknown HVMOP %d\n", __func__, cmd);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[1], argsize);
++	if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++		return -ENOMEM;
++
++	ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
++
++	xencomm_free(desc);
++	return ret;
++}
++
++static int
++xencomm_privcmd_sched_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	int ret;
++
++	switch (cmd) {
++	case SCHEDOP_remote_shutdown:
++		argsize = sizeof(sched_remote_shutdown_t);
++		break;
++	default:
++		printk("%s: unknown SCHEDOP %d\n", __func__, cmd);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[1], argsize);
++	if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++		return -ENOMEM;
++
++	ret = xencomm_arch_hypercall_sched_op(cmd, desc);
++
++	xencomm_free(desc);
++	return ret;
++}
++
++static int
++xencomm_privcmd_ia64_dom0vp_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	int ret;
++
++	switch (cmd) {
++	case IA64_DOM0VP_fpswa_revision: {
++		unsigned int revision;
++		unsigned int __user *revision_user =
++			(unsigned int* __user)hypercall->arg[1];
++		struct xencomm_handle *desc;
++		desc = xencomm_map(&revision, sizeof(revision));
++		if (desc == NULL)
++			return -ENOMEM;
++
++		ret = xencomm_arch_hypercall_fpswa_revision(desc);
++		xencomm_free(desc);
++		if (ret)
++			break;
++		if (copy_to_user(revision_user, &revision, sizeof(revision)))
++			ret = -EFAULT;
++		break;
++	}
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++	case IA64_DOM0VP_expose_foreign_p2m:
++		ret = xen_foreign_p2m_expose(hypercall);
++		break;
++#endif
++	default:
++		printk("%s: unknown IA64 DOM0VP op %d\n", __func__, cmd);
++		ret = -EINVAL;
++		break;
++	}
++	return ret;
++}
++
++static int
++xencomm_privcmd_ia64_debug_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	unsigned long domain = hypercall->arg[1];
++	struct xencomm_handle *desc;
++	int ret;
++
++	switch (cmd) {
++	case XEN_IA64_DEBUG_OP_SET_FLAGS:
++	case XEN_IA64_DEBUG_OP_GET_FLAGS:
++		break;
++	default:
++		printk("%s: unknown IA64 DEBUGOP %d\n", __func__, cmd);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[2],
++			   sizeof(xen_ia64_debug_op_t));
++	if (desc == NULL)
++		return -ENOMEM;
++
++	ret = xencomm_arch_hypercall_ia64_debug_op(cmd, domain, desc);
++
++	xencomm_free(desc);
++	return ret;	
++}
++
++static int
++xencomm_privcmd_ia64_physdev_op(privcmd_hypercall_t *hypercall)
++{
++	int cmd = hypercall->arg[0];
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	int ret;
++
++	switch (cmd) {
++	case PHYSDEVOP_map_pirq:
++		argsize = sizeof(physdev_map_pirq_t);
++		break;
++	case PHYSDEVOP_unmap_pirq:
++		argsize = sizeof(physdev_unmap_pirq_t);
++		break;
++	default:
++		printk("%s: unknown PHYSDEVOP %d\n", __func__, cmd);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[1], argsize);
++	if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++		return -ENOMEM;
++
++	ret = xencomm_arch_hypercall_physdev_op(cmd, desc);
++
++	xencomm_free(desc);
++	return ret;
++}
++
++int
++privcmd_hypercall(privcmd_hypercall_t *hypercall)
++{
++	switch (hypercall->op) {
++	case __HYPERVISOR_platform_op:
++		return xencomm_privcmd_platform_op(hypercall);
++	case __HYPERVISOR_domctl:
++		return xencomm_privcmd_domctl(hypercall);
++	case __HYPERVISOR_sysctl:
++		return xencomm_privcmd_sysctl(hypercall);
++	case __HYPERVISOR_xsm_op:
++		return xencomm_privcmd_xsm_op(hypercall);
++	case __HYPERVISOR_xen_version:
++		return xencomm_privcmd_xen_version(hypercall);
++	case __HYPERVISOR_memory_op:
++		return xencomm_privcmd_memory_op(hypercall);
++	case __HYPERVISOR_event_channel_op:
++		return xencomm_privcmd_event_channel_op(hypercall);
++	case __HYPERVISOR_hvm_op:
++		return xencomm_privcmd_hvm_op(hypercall);
++	case __HYPERVISOR_sched_op:
++		return xencomm_privcmd_sched_op(hypercall);
++	case __HYPERVISOR_ia64_dom0vp_op:
++		return xencomm_privcmd_ia64_dom0vp_op(hypercall);
++	case __HYPERVISOR_ia64_debug_op:
++		return xencomm_privcmd_ia64_debug_op(hypercall);
++	case __HYPERVISOR_physdev_op:
++		return xencomm_privcmd_ia64_physdev_op(hypercall);
++	default:
++		printk("%s: unknown hcall (%ld)\n", __func__, hypercall->op);
++		return -ENOSYS;
++	}
++}
++
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xen_dma.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xen_dma.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,190 @@
++/*
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ * 	Alex Williamson <alex.williamson at hp.com>
++ *
++ * Basic DMA mapping services for Xen guests.
++ * Based on arch/i386/kernel/pci-dma-xen.c.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/bitops.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <asm/scatterlist.h>
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++
++#define IOMMU_BUG_ON(test)					\
++do {								\
++	if (unlikely(test)) {					\
++		printk(KERN_ALERT "Fatal DMA error!\n");	\
++		BUG();						\
++	}							\
++} while (0)
++
++static int check_pages_physically_contiguous(unsigned long pfn, 
++					     unsigned int offset,
++					     size_t length)
++{
++	unsigned long next_bus;
++	int i;
++	int nr_pages;
++
++	next_bus = pfn_to_mfn_for_dma(pfn);
++	nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++
++	for (i = 1; i < nr_pages; i++) {
++		if (pfn_to_mfn_for_dma(++pfn) != ++next_bus) 
++			return 0;
++	}
++	return 1;
++}
++
++int range_straddles_page_boundary(paddr_t p, size_t size)
++{
++	extern unsigned long *contiguous_bitmap;
++	unsigned long pfn = p >> PAGE_SHIFT;
++	unsigned int offset = p & ~PAGE_MASK;
++
++	if (!is_running_on_xen())
++		return 0;
++
++	if (offset + size <= PAGE_SIZE)
++		return 0;
++	if (test_bit(pfn, contiguous_bitmap))
++		return 0;
++	if (check_pages_physically_contiguous(pfn, offset, size))
++		return 0;
++	return 1;
++}
++
++/*
++ * This should be broken out of swiotlb and put in a common place
++ * when merged with upstream Linux.
++ */
++static inline int
++address_needs_mapping(struct device *dev, dma_addr_t addr)
++{
++	dma_addr_t mask = 0xffffffff;
++
++	/* If the device has a mask, use it, otherwise default to 32 bits */
++	if (dev && dev->dma_mask)
++		mask = *dev->dma_mask;
++	return (addr & ~mask) != 0;
++}
++
++int
++xen_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++	   int direction)
++{
++	int i;
++
++	for (i = 0 ; i < nents ; i++) {
++		sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset;
++		sg[i].dma_length  = sg[i].length;
++
++		IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
++		IOMMU_BUG_ON(range_straddles_page_boundary( 
++			page_to_pseudophys(sg[i].page) + sg[i].offset,
++			sg[i].length));
++	}
++
++	return nents;
++}
++EXPORT_SYMBOL(xen_map_sg);
++
++void
++xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++	     int direction)
++{
++	int i;
++	for (i = 0; i < nents; i++)
++		__gnttab_dma_unmap_page(sg[i].page);
++}
++EXPORT_SYMBOL(xen_unmap_sg);
++
++int
++xen_dma_mapping_error(dma_addr_t dma_addr)
++{
++	return 0;
++}
++EXPORT_SYMBOL(xen_dma_mapping_error);
++
++int
++xen_dma_supported(struct device *dev, u64 mask)
++{
++	return 1;
++}
++EXPORT_SYMBOL(xen_dma_supported);
++
++void *
++xen_alloc_coherent(struct device *dev, size_t size,
++		   dma_addr_t *dma_handle, gfp_t gfp)
++{
++	unsigned long vaddr;
++	unsigned int order = get_order(size);
++
++	vaddr = __get_free_pages(gfp, order);
++
++	if (!vaddr)
++		return NULL;
++
++	if (xen_create_contiguous_region(vaddr, order,
++					 fls64(dev->coherent_dma_mask))) {
++		free_pages(vaddr, order);
++		return NULL;
++	}
++
++	memset((void *)vaddr, 0, size);
++	*dma_handle = virt_to_bus((void *)vaddr);
++
++	return (void *)vaddr;
++}
++EXPORT_SYMBOL(xen_alloc_coherent);
++
++void
++xen_free_coherent(struct device *dev, size_t size,
++		      void *vaddr, dma_addr_t dma_handle)
++{
++	unsigned int order =  get_order(size);
++
++	xen_destroy_contiguous_region((unsigned long)vaddr, order);
++	free_pages((unsigned long)vaddr, order);
++}
++EXPORT_SYMBOL(xen_free_coherent);
++
++dma_addr_t
++xen_map_single(struct device *dev, void *ptr, size_t size,
++	       int direction)
++{
++	dma_addr_t dma_addr = gnttab_dma_map_virt(ptr);
++
++	IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
++	IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++
++	return dma_addr;
++}
++EXPORT_SYMBOL(xen_map_single);
++
++void
++xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++		 int direction)
++{
++	gnttab_dma_unmap_page(dma_addr);
++}
++EXPORT_SYMBOL(xen_unmap_single);
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xencomm.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xencomm.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,109 @@
++/*
++ * Copyright (C) 2006 Hollis Blanchard <hollisb at us.ibm.com>, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ */
++
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <xen/interface/xen.h>
++#include <asm/page.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#include <asm/xen/xencomm.h>
++
++static unsigned long kernel_start_pa;
++
++void
++xencomm_initialize (void)
++{
++	kernel_start_pa = KERNEL_START - ia64_tpa(KERNEL_START);
++}
++
++/* Translate virtual address to physical address.  */
++unsigned long
++xencomm_vtop(unsigned long vaddr)
++{
++#ifndef CONFIG_VMX_GUEST
++	struct page *page;
++	struct vm_area_struct *vma;
++#endif
++
++	if (vaddr == 0)
++		return 0;
++
++#ifdef __ia64__
++	if (REGION_NUMBER(vaddr) == 5) {
++		pgd_t *pgd;
++		pud_t *pud;
++		pmd_t *pmd;
++		pte_t *ptep;
++
++		/* On ia64, TASK_SIZE refers to current.  It is not initialized
++		   during boot.
++		   Furthermore the kernel is relocatable and __pa() doesn't
++		   work on  addresses.  */
++		if (vaddr >= KERNEL_START
++		    && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) {
++			return vaddr - kernel_start_pa;
++		}
++
++		/* In kernel area -- virtually mapped.  */
++		pgd = pgd_offset_k(vaddr);
++		if (pgd_none(*pgd) || pgd_bad(*pgd))
++			return ~0UL;
++
++		pud = pud_offset(pgd, vaddr);
++		if (pud_none(*pud) || pud_bad(*pud))
++			return ~0UL;
++
++		pmd = pmd_offset(pud, vaddr);
++		if (pmd_none(*pmd) || pmd_bad(*pmd))
++			return ~0UL;
++
++		ptep = pte_offset_kernel(pmd, vaddr);
++		if (!ptep)
++			return ~0UL;
++
++		return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
++	}
++#endif
++
++	if (vaddr > TASK_SIZE) {
++		/* kernel address */
++		return __pa(vaddr);
++	}
++
++
++#ifdef CONFIG_VMX_GUEST
++	/* No privcmd within vmx guest.  */
++	return ~0UL;
++#else
++	/* XXX double-check (lack of) locking */
++	vma = find_extend_vma(current->mm, vaddr);
++	if (!vma)
++		return ~0UL;
++
++	/* We assume the page is modified.  */
++	page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
++	if (!page)
++		return ~0UL;
++
++	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
++#endif
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xenentry.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xenentry.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,931 @@
++/*
++ * ia64/xen/entry.S
++ *
++ * Alternate kernel routines for Xen.  Heavily leveraged from
++ *   ia64/kernel/entry.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at .hp.com>
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/kregs.h>
++#include <asm/asm-offsets.h>
++#include <asm/pgtable.h>
++#include <asm/percpu.h>
++#include <asm/processor.h>
++#include <asm/thread_info.h>
++#include <asm/unistd.h>
++
++#ifdef CONFIG_XEN
++#include "xenminstate.h"
++#else
++#include "minstate.h"
++#endif
++
++/*
++ * prev_task <- ia64_switch_to(struct task_struct *next)
++ *	With Ingo's new scheduler, interrupts are disabled when this routine gets
++ *	called.  The code starting at .map relies on this.  The rest of the code
++ *	doesn't care about the interrupt masking status.
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_switch_to)
++	.prologue
++	alloc r16=ar.pfs,1,0,0,0
++	movl r22=running_on_xen;;
++	ld4 r22=[r22];;
++	cmp.eq p7,p0=r22,r0
++(p7)	br.cond.sptk.many __ia64_switch_to;;
++#else
++GLOBAL_ENTRY(ia64_switch_to)
++	.prologue
++	alloc r16=ar.pfs,1,0,0,0
++#endif
++	DO_SAVE_SWITCH_STACK
++	.body
++
++	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
++	movl r25=init_task
++	mov r27=IA64_KR(CURRENT_STACK)
++	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
++	dep r20=0,in0,61,3		// physical address of "next"
++	;;
++	st8 [r22]=sp			// save kernel stack pointer of old task
++	shr.u r26=r20,IA64_GRANULE_SHIFT
++	cmp.eq p7,p6=r25,in0
++	;;
++	/*
++	 * If we've already mapped this task's page, we can skip doing it again.
++	 */
++(p6)	cmp.eq p7,p6=r26,r27
++(p6)	br.cond.dpnt .map
++	;;
++.done:
++	ld8 sp=[r21]			// load kernel stack pointer of new task
++#ifdef CONFIG_XEN
++	// update "current" application register
++	mov r8=IA64_KR_CURRENT
++	mov r9=in0;;
++	XEN_HYPER_SET_KR
++#else
++	mov IA64_KR(CURRENT)=in0	// update "current" application register
 +#endif
 +	mov r8=r13			// return pointer to previously running task
 +	mov r13=in0			// set "current" pointer
@@ -28729,33 +32681,10 @@
 +#else
 +END(ia64_leave_kernel)
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xenhpski.c tmp-linux-2.6-xen.patch/arch/ia64/xen/xenhpski.c
---- pristine-linux-2.6.18.2/arch/ia64/xen/xenhpski.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xenhpski.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,19 @@
-+
-+extern unsigned long xen_get_cpuid(int);
-+
-+int
-+running_on_sim(void)
-+{
-+	int i;
-+	long cpuid[6];
-+
-+	for (i = 0; i < 5; ++i)
-+		cpuid[i] = xen_get_cpuid(i);
-+	if ((cpuid[0] & 0xff) != 'H') return 0;
-+	if ((cpuid[3] & 0xff) != 0x4) return 0;
-+	if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
-+	if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
-+	if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
-+	return 1;
-+}
-+
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xenivt.S tmp-linux-2.6-xen.patch/arch/ia64/xen/xenivt.S
---- pristine-linux-2.6.18.2/arch/ia64/xen/xenivt.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xenivt.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,2177 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xenivt.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xenivt.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2178 @@
 +/*
 + * arch/ia64/xen/ivt.S
 + *
@@ -29454,16 +33383,17 @@
 +	st4 [r3]=r14				// vpsr.ic = 1
 +	adds r3=8,r2				// set up second base pointer
 +	;;
++	sum PSR_DEFAULT_BITS
 +#else
 +	mov out0=cr.ifa
 +	mov out1=cr.isr
 +	adds r3=8,r2				// set up second base pointer
 +	;;
 +	ssm psr.ic | PSR_DEFAULT_BITS
++#endif
 +	;;
 +	srlz.i					// guarantee that interruption collectin is on
 +	;;
-+#endif
 +#ifdef CONFIG_XEN
 +    
 +#define MASK_TO_PEND_OFS    (-1)
@@ -29969,7 +33899,7 @@
 +	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
 +#ifdef CONFIG_XEN
 +	;;
-+	br.call.sptk.many rp=xen_get_ivr
++	XEN_HYPER_GET_IVR
 +	;;
 +	mov out0=r8		// pass cr.ivr as first arg
 +#else
@@ -30933,9 +34863,9 @@
 +
 +   
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xenminstate.h tmp-linux-2.6-xen.patch/arch/ia64/xen/xenminstate.h
---- pristine-linux-2.6.18.2/arch/ia64/xen/xenminstate.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xenminstate.h	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xenminstate.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xenminstate.h	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,358 @@
 +#include <asm/cache.h>
 +
@@ -31295,9 +35225,9 @@
 +#else
 +#define SAVE_MIN		DO_SAVE_MIN(     , mov r30=r0, )
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xenpal.S tmp-linux-2.6-xen.patch/arch/ia64/xen/xenpal.S
---- pristine-linux-2.6.18.2/arch/ia64/xen/xenpal.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xenpal.S	2007-07-30 16:35:11.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xenpal.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xenpal.S	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,85 @@
 +/*
 + * ia64/xen/xenpal.S
@@ -31384,10 +35314,10 @@
 +	srlz.d				// seralize restoration of psr.l
 +	br.ret.sptk.many b0
 +END(xen_pal_call_static)
-diff -Nurp pristine-linux-2.6.18.2/arch/ia64/xen/xensetup.S tmp-linux-2.6-xen.patch/arch/ia64/xen/xensetup.S
---- pristine-linux-2.6.18.2/arch/ia64/xen/xensetup.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/ia64/xen/xensetup.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,52 @@
+diff -r d894e36cfc30 -r 0aa021803deb arch/ia64/xen/xensetup.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/xensetup.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,40 @@
 +/*
 + * Support routines for Xen
 + *
@@ -31397,6 +35327,13 @@
 +#include <asm/processor.h>
 +#include <asm/asmmacro.h>
 +
++	.section .data.read_mostly
++	.align 8
++	.global running_on_xen
++running_on_xen:
++	data4 0
++	.previous
++
 +#define isBP	p3	// are we the Bootstrap Processor?
 +
 +	.text
@@ -31421,19852 +35358,51883 @@
 +	br.ret.sptk.many rp
 +	;;
 +END(early_xen_setup)
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/Kconfig
+--- a/arch/powerpc/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -438,6 +438,17 @@
+ config UDBG_RTAS_CONSOLE
+ 	bool
+ 	default n
 +
-+#include <xen/interface/xen.h>
-+
-+/* Stub for suspend.
-+   Just force the stacked registers to be written in memory.  */	
-+GLOBAL_ENTRY(xencomm_arch_hypercall_suspend)
-+	;; 
-+	alloc r20=ar.pfs,0,0,6,0
-+	mov r2=__HYPERVISOR_sched_op
-+	;; 
-+	/* We don't want to deal with RSE.  */
-+	flushrs
-+	mov r33=r32
-+	mov r32=2 // SCHEDOP_shutdown
-+	;;
-+	break 0x1000
-+	;; 
-+	br.ret.sptk.many b0
-+END(xencomm_arch_hypercall_suspend)
-diff -Nurp pristine-linux-2.6.18.2/arch/um/kernel/physmem.c tmp-linux-2.6-xen.patch/arch/um/kernel/physmem.c
---- pristine-linux-2.6.18.2/arch/um/kernel/physmem.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/um/kernel/physmem.c	2007-07-30 16:35:11.000000000 +0200
-@@ -226,7 +226,7 @@ EXPORT_SYMBOL(physmem_forget_descriptor)
- EXPORT_SYMBOL(physmem_remove_mapping);
- EXPORT_SYMBOL(physmem_subst_mapping);
++config PPC_XEN
++       bool "Enable Xen compatible kernel"
++       depends on PPC_MULTIPLATFORM && PPC64 && PPC_MAPLE && PPC_PSERIES && SMP
++       select XEN
++       select XEN_PRIVILEGED_GUEST
++       select XEN_UNPRIVILEGED_GUEST
++       select XEN_XENCOMM
++       
++       help
++	  This option will compile a kernel compatible with Xen hypervisor
  
--void arch_free_page(struct page *page, int order)
-+int arch_free_page(struct page *page, int order)
- {
- 	void *virt;
- 	int i;
-@@ -235,6 +235,8 @@ void arch_free_page(struct page *page, i
- 		virt = __va(page_to_phys(page + i));
- 		physmem_remove_mapping(virt);
- 	}
-+
-+	return 0;
- }
+ config XICS
+ 	depends on PPC_PSERIES
+@@ -1080,6 +1091,8 @@
  
- int is_remapped(void *virt)
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/ia32entry-xen.S tmp-linux-2.6-xen.patch/arch/x86_64/ia32/ia32entry-xen.S
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/ia32entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/ia32entry-xen.S	2007-10-14 01:46:51.000000000 +0200
-@@ -0,0 +1,755 @@
-+/*
-+ * Compatibility mode system call entry point for x86-64. 
-+ * 		
-+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
-+ */		 
+ source "security/Kconfig"
+ 
++source "drivers/xen/Kconfig"
 +
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/current.h>
-+#include <asm/errno.h>
-+#include <asm/ia32_unistd.h>	
-+#include <asm/thread_info.h>	
-+#include <asm/segment.h>
-+#include <asm/vsyscall32.h>
-+#include <asm/irqflags.h>
-+#include <linux/linkage.h>
+ config KEYS_COMPAT
+ 	bool
+ 	depends on COMPAT && KEYS
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/Kconfig.debug
+--- a/arch/powerpc/Kconfig.debug	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/Kconfig.debug	Wed Sep 10 10:54:08 2008 +0100
+@@ -160,6 +160,20 @@
+ 	  Select this to enable early debugging for legacy iSeries. You need
+ 	  to hit "Ctrl-x Ctrl-x" to see the messages on the console.
+ 
++config PPC_EARLY_DEBUG_XEN_DOM0
++	bool "Xen Dom0 Console"
++	depends on PPC_XEN
++	help
++	  Select this to enable early debugging for Xen Dom0. Setting
++	  this will result in a kernel that may not work as a DomU.
 +
-+#define __XEN_X86_64 1
++config PPC_EARLY_DEBUG_XEN_DOMU
++	bool "Xen DomU Console"
++	depends on PPC_XEN && XEN_UNPRIVILEGED_GUEST
++	help
++	  Select this to enable early debugging for Xen DomU. Setting
++	  this will result in a kernel that may not work as a Dom0.
 +
-+#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
+ endchoice
+ 
+ endmenu
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/Makefile
+--- a/arch/powerpc/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -65,6 +65,7 @@
+ AFLAGS-$(CONFIG_PPC32)	:= -Iarch/$(ARCH)
+ CFLAGS-$(CONFIG_PPC64)	:= -mminimal-toc -mtraceback=none  -mcall-aixdesc
+ CFLAGS-$(CONFIG_PPC32)	:= -Iarch/$(ARCH) -ffixed-r2 -mmultiple
++CFLAGS-$(CONFIG_PPC_XEN) += -Iinclude/asm-$(ARCH)/xen
+ CPPFLAGS	+= $(CPPFLAGS-y)
+ AFLAGS		+= $(AFLAGS-y)
+ CFLAGS		+= -msoft-float -pipe $(CFLAGS-y)
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/boot/Makefile
+--- a/arch/powerpc/boot/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/boot/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -36,8 +36,11 @@
+ $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
+ #$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
+ 
++xen_guest-y = xen_guest.S
++
+ src-boot := crt0.S string.S prom.c stdio.c main.c div64.S
+ src-boot += $(zlib)
++src-boot += $(xen_guest-$(CONFIG_XEN))
+ src-boot := $(addprefix $(obj)/, $(src-boot))
+ obj-boot := $(addsuffix .o, $(basename $(src-boot)))
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/boot/xen_guest.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/boot/xen_guest.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,7 @@
++	.section __xen_guest
++	.ascii	"GUEST_OS=linux"
++	.ascii  ",GUEST_VER=xen-3.0"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0x0"
++	.ascii	",LOADER=generic"
++	.byte	0
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/configs/xen_maple_defconfig
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/configs/xen_maple_defconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1342 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.17
++# Mon Jan 15 23:48:47 2007
++#
++CONFIG_PPC64=y
++CONFIG_64BIT=y
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_PPC_UDBG_16550=y
++CONFIG_GENERIC_TBSYNC=y
++# CONFIG_DEFAULT_UIMAGE is not set
 +
-+	.macro IA32_ARG_FIXUP noebp=0
-+	movl	%edi,%r8d
-+	.if \noebp
-+	.else
-+	movl	%ebp,%r9d
-+	.endif
-+	xchg	%ecx,%esi
-+	movl	%ebx,%edi
-+	movl	%edx,%edx	/* zero extension */
-+	.endm 
++#
++# Processor support
++#
++CONFIG_POWER4_ONLY=y
++CONFIG_POWER4=y
++CONFIG_PPC_FPU=y
++CONFIG_ALTIVEC=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_VIRT_CPU_ACCOUNTING=y
++CONFIG_SMP=y
++CONFIG_NR_CPUS=32
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 +
-+	/* clobbers %eax */	
-+	.macro  CLEAR_RREGS
-+	xorl 	%eax,%eax
-+	movq	%rax,R11(%rsp)
-+	movq	%rax,R10(%rsp)
-+	movq	%rax,R9(%rsp)
-+	movq	%rax,R8(%rsp)
-+	.endm
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
 +
-+	.macro LOAD_ARGS32 offset
-+	movl \offset(%rsp),%r11d
-+	movl \offset+8(%rsp),%r10d
-+	movl \offset+16(%rsp),%r9d
-+	movl \offset+24(%rsp),%r8d
-+	movl \offset+40(%rsp),%ecx
-+	movl \offset+48(%rsp),%edx
-+	movl \offset+56(%rsp),%esi
-+	movl \offset+64(%rsp),%edi
-+	movl \offset+72(%rsp),%eax
-+	.endm
++#
++# General setup
++#
++CONFIG_LOCALVERSION="-Xen"
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_RT_MUTEXES=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
 +
-+#if defined (__XEN_X86_64)
-+#include "../kernel/xen_entry.S"
-+		
-+#define	__swapgs
-+#define __cli
-+#define __sti	
-+#else
-+/*
-+ * Use the native instructions
-+ */	
-+#define	__swapgs	swapgs
-+#define __cli		cli
-+#define __sti		sti	
-+#endif			
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
 +
-+	.macro CFI_STARTPROC32 simple
-+	CFI_STARTPROC	\simple
-+	CFI_UNDEFINED	r8
-+	CFI_UNDEFINED	r9
-+	CFI_UNDEFINED	r10
-+	CFI_UNDEFINED	r11
-+	CFI_UNDEFINED	r12
-+	CFI_UNDEFINED	r13
-+	CFI_UNDEFINED	r14
-+	CFI_UNDEFINED	r15
-+	.endm
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
 +
-+/*
-+ * 32bit SYSENTER instruction entry.
-+ *
-+ * Arguments:
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp user stack
-+ * 0(%ebp) Arg6	
-+ * 	
-+ * Interrupts off.
-+ *	
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below.	Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */ 	
-+ENTRY(ia32_sysenter_target)
-+	CFI_STARTPROC32	simple
-+	CFI_DEF_CFA	rsp,0
-+	CFI_REGISTER	rsp,rbp
-+	__swapgs 
-+	movq	%gs:pda_kernelstack, %rsp
-+	addq	$(PDA_STACKOFFSET),%rsp
-+	/*
-+	 * No need to follow this irqs on/off section: the syscall
-+	 * disabled irqs, here we enable it straight after entry:
-+	 */
-+	XEN_UNBLOCK_EVENTS(%r11)	
-+	__sti
-+ 	movl	%ebp,%ebp		/* zero extension */
-+	pushq	$__USER32_DS
-+	CFI_ADJUST_CFA_OFFSET 8
-+	/*CFI_REL_OFFSET ss,0*/
-+	pushq	%rbp
-+	CFI_ADJUST_CFA_OFFSET 8
-+	CFI_REL_OFFSET rsp,0
-+	pushfq
-+	CFI_ADJUST_CFA_OFFSET 8
-+	/*CFI_REL_OFFSET rflags,0*/
-+	movl	$VSYSCALL32_SYSEXIT, %r10d
-+	CFI_REGISTER rip,r10
-+	pushq	$__USER32_CS
-+	CFI_ADJUST_CFA_OFFSET 8
-+	/*CFI_REL_OFFSET cs,0*/
-+	movl	%eax, %eax
-+	pushq	%r10
-+	CFI_ADJUST_CFA_OFFSET 8
-+	CFI_REL_OFFSET rip,0
-+	pushq	%rax
-+	CFI_ADJUST_CFA_OFFSET 8
-+	cld
-+	SAVE_ARGS 0,0,0
-+ 	/* no need to do an access_ok check here because rbp has been
-+ 	   32bit zero extended */ 
-+1:	movl	(%rbp),%r9d
-+ 	.section __ex_table,"a"
-+ 	.quad 1b,ia32_badarg
-+ 	.previous	
-+	GET_THREAD_INFO(%r10)
-+	orl    $TS_COMPAT,threadinfo_status(%r10)
-+	testl  $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	CFI_REMEMBER_STATE
-+	jnz  sysenter_tracesys
-+sysenter_do_call:	
-+	cmpl	$(IA32_NR_syscalls-1),%eax
-+	ja	ia32_badsys
-+	IA32_ARG_FIXUP 1
-+	call	*ia32_sys_call_table(,%rax,8)
-+	movq	%rax,RAX-ARGOFFSET(%rsp)
-+	GET_THREAD_INFO(%r10)
-+	XEN_BLOCK_EVENTS(%r11)	
-+	__cli
-+	TRACE_IRQS_OFF
-+	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+	jnz	int_ret_from_sys_call
-+	andl    $~TS_COMPAT,threadinfo_status(%r10)
-+	/* clear IF, that popfq doesn't enable interrupts early */
-+	andl  $~0x200,EFLAGS-R11(%rsp) 
-+	RESTORE_ARGS 1,24,1,1,1,1
-+	popfq
-+	CFI_ADJUST_CFA_OFFSET -8
-+	/*CFI_RESTORE rflags*/
-+	popq	%rcx				/* User %esp */
-+	CFI_ADJUST_CFA_OFFSET -8
-+	CFI_REGISTER rsp,rcx
-+	movl	$VSYSCALL32_SYSEXIT,%edx	/* User %eip */
-+	CFI_REGISTER rip,rdx
-+	TRACE_IRQS_ON
-+	__swapgs
-+	XEN_UNBLOCK_EVENTS(%r11)		
-+	__sti		/* sti only takes effect after the next instruction */
-+	/* sysexit */
-+	.byte	0xf, 0x35  /* TBD */
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_AS is not set
++CONFIG_IOSCHED_DEADLINE=y
++# CONFIG_IOSCHED_CFQ is not set
++# CONFIG_DEFAULT_AS is not set
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
 +
-+sysenter_tracesys:
-+	CFI_RESTORE_STATE
-+	SAVE_REST
-+	CLEAR_RREGS
-+	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
-+	call	syscall_trace_enter
-+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	movl	%ebp, %ebp
-+	/* no need to do an access_ok check here because rbp has been
-+	   32bit zero extended */ 
-+1:	movl	(%rbp),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous
-+	jmp	sysenter_do_call
-+	CFI_ENDPROC
-+ENDPROC(ia32_sysenter_target)
++#
++# Platform support
++#
++CONFIG_PPC_MULTIPLATFORM=y
++# CONFIG_PPC_ISERIES is not set
++# CONFIG_EMBEDDED6xx is not set
++# CONFIG_APUS is not set
++CONFIG_PPC_PSERIES=y
++# CONFIG_PPC_PMAC is not set
++CONFIG_PPC_MAPLE=y
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PPC_IBM_CELL_BLADE is not set
++# CONFIG_UDBG_RTAS_CONSOLE is not set
++CONFIG_PPC_XEN=y
++CONFIG_XICS=y
++CONFIG_U3_DART=y
++CONFIG_MPIC=y
++CONFIG_PPC_RTAS=y
++CONFIG_RTAS_ERROR_LOGGING=y
++CONFIG_RTAS_PROC=y
++CONFIG_RTAS_FLASH=y
++# CONFIG_MMIO_NVRAM is not set
++CONFIG_MPIC_BROKEN_U3=y
++CONFIG_IBMVIO=y
++# CONFIG_IBMEBUS is not set
++# CONFIG_PPC_MPC106 is not set
++CONFIG_PPC_970_NAP=y
++# CONFIG_CPU_FREQ is not set
++# CONFIG_WANT_EARLY_SERIAL is not set
 +
-+/*
-+ * 32bit SYSCALL instruction entry.
-+ *
-+ * Arguments:
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx return EIP 
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg2    [note: not saved in the stack frame, should not be touched]
-+ * %esp user stack 
-+ * 0(%esp) Arg6
-+ * 	
-+ * Interrupts off.
-+ *	
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below.	Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.	
-+ */ 	
-+ENTRY(ia32_cstar_target)
-+	CFI_STARTPROC32	simple
-+	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
-+	CFI_REGISTER	rip,rcx
-+	/*CFI_REGISTER	rflags,r11*/
-+	__swapgs
-+	movl	%esp,%r8d
-+	CFI_REGISTER	rsp,r8
-+	movq	%gs:pda_kernelstack,%rsp
-+	/*
-+	 * No need to follow this irqs on/off section: the syscall
-+	 * disabled irqs and here we enable it straight after entry:
-+	 */
-+	XEN_UNBLOCK_EVENTS(%r11)	
-+	__sti
-+	SAVE_ARGS 8,1,1
-+	movl 	%eax,%eax	/* zero extension */
-+	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
-+	movq	%rcx,RIP-ARGOFFSET(%rsp)
-+	CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+	movq	%rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
-+	movl	%ebp,%ecx
-+	movq	$__USER32_CS,CS-ARGOFFSET(%rsp)
-+	movq	$__USER32_DS,SS-ARGOFFSET(%rsp)
-+	movq	%r11,EFLAGS-ARGOFFSET(%rsp)
-+	/*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+	movq	%r8,RSP-ARGOFFSET(%rsp)	
-+	CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+	/* no need to do an access_ok check here because r8 has been
-+	   32bit zero extended */ 
-+	/* hardware stack frame is complete now */	
-+1:	movl	(%r8),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous	
-+	GET_THREAD_INFO(%r10)
-+	orl   $TS_COMPAT,threadinfo_status(%r10)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	CFI_REMEMBER_STATE
-+	jnz   cstar_tracesys
-+cstar_do_call:	
-+	cmpl $IA32_NR_syscalls-1,%eax
-+	ja  ia32_badsys
-+	IA32_ARG_FIXUP 1
-+	call *ia32_sys_call_table(,%rax,8)
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+	GET_THREAD_INFO(%r10)
-+	XEN_BLOCK_EVENTS(%r11)		
-+	__cli
-+	TRACE_IRQS_OFF
-+	testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+	jnz  int_ret_from_sys_call
-+	andl $~TS_COMPAT,threadinfo_status(%r10)
-+	RESTORE_ARGS 1,-ARG_SKIP,1,1,1
-+	movl RIP-ARGOFFSET(%rsp),%ecx
-+	CFI_REGISTER rip,rcx
-+	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
-+	/*CFI_REGISTER rflags,r11*/
-+	TRACE_IRQS_ON
-+	movl RSP-ARGOFFSET(%rsp),%esp
-+	CFI_RESTORE rsp
-+	__swapgs
-+	sysretl  /* TBD */
-+	
-+cstar_tracesys:	
-+	CFI_RESTORE_STATE
-+	SAVE_REST
-+	CLEAR_RREGS
-+	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq %rsp,%rdi        /* &pt_regs -> arg1 */
-+	call syscall_trace_enter
-+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	movl RSP-ARGOFFSET(%rsp), %r8d
-+	/* no need to do an access_ok check here because r8 has been
-+	   32bit zero extended */ 
-+1:	movl	(%r8),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous
-+	jmp cstar_do_call
-+END(ia32_cstar_target)
-+				
-+ia32_badarg:
-+	movq $-EFAULT,%rax
-+	jmp ia32_sysret
-+	CFI_ENDPROC
++#
++# Kernel options
++#
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++# CONFIG_PREEMPT_BKL is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_FORCE_MAX_ZONEORDER=13
++CONFIG_IOMMU_VMERGE=y
++# CONFIG_HOTPLUG_CPU is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++CONFIG_IRQ_ALL_CPUS=y
++# CONFIG_PPC_SPLPAR is not set
++CONFIG_EEH=y
++CONFIG_SCANLOG=y
++CONFIG_LPARCFG=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=4
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_NEED_MULTIPLE_NODES=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_MEMORY_HOTPLUG=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_MIGRATION=y
++CONFIG_RESOURCES_64BIT=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_ARCH_MEMORY_PROBE=y
++# CONFIG_PPC_64K_PAGES is not set
++# CONFIG_SCHED_SMT is not set
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SECCOMP=y
++CONFIG_ISA_DMA_API=y
 +
-+/* 
-+ * Emulated IA32 system calls via int 0x80. 
-+ *
-+ * Arguments:	 
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg6    [note: not saved in the stack frame, should not be touched]
-+ *
-+ * Notes:
-+ * Uses the same stack frame as the x86-64 version.	
-+ * All registers except %eax must be saved (but ptrace may violate that)
-+ * Arguments are zero extended. For system calls that want sign extension and
-+ * take long arguments a wrapper is needed. Most calls can just be called
-+ * directly.
-+ * Assumes it is only called from user space and entered with interrupts off.	
-+ */ 				
++#
++# Bus options
++#
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_I8259=y
++# CONFIG_PPC_INDIRECT_PCI is not set
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
 +
-+ENTRY(ia32_syscall)
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,SS+8-RIP
-+	/*CFI_REL_OFFSET	ss,SS-RIP*/
-+	CFI_REL_OFFSET	rsp,RSP-RIP
-+	/*CFI_REL_OFFSET	rflags,EFLAGS-RIP*/
-+	/*CFI_REL_OFFSET	cs,CS-RIP*/
-+	CFI_REL_OFFSET	rip,RIP-RIP
-+	__swapgs
-+	/*
-+	 * No need to follow this irqs on/off section: the syscall
-+	 * disabled irqs and here we enable it straight after entry:
-+	 */
-+	XEN_UNBLOCK_EVENTS(%r11)
-+	__sti
-+	movq (%rsp),%rcx
-+	movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */
-+	movl %eax,%eax
-+	pushq %rax
-+	CFI_ADJUST_CFA_OFFSET 8
-+	cld
-+/* 1:	jmp 1b	 */
-+	/* note the registers are not zero extended to the sf.
-+	   this could be a problem. */
-+	SAVE_ARGS 0,0,1
-+	GET_THREAD_INFO(%r10)
-+	orl   $TS_COMPAT,threadinfo_status(%r10)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	jnz ia32_tracesys
-+ia32_do_syscall:	
-+	cmpl $(IA32_NR_syscalls-1),%eax
-+	ja  ia32_badsys
-+	IA32_ARG_FIXUP
-+	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
-+ia32_sysret:
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+	jmp int_ret_from_sys_call 
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
 +
-+ia32_tracesys:			 
-+	SAVE_REST
-+	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq %rsp,%rdi        /* &pt_regs -> arg1 */
-+	call syscall_trace_enter
-+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	jmp ia32_do_syscall
-+END(ia32_syscall)
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++CONFIG_KERNEL_START=0xc000000000000000
 +
-+ia32_badsys:
-+	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-+	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+	jmp int_ret_from_sys_call
++#
++# Networking
++#
++CONFIG_NET=y
 +
-+quiet_ni_syscall:
-+	movq $-ENOSYS,%rax
-+	ret
-+	CFI_ENDPROC
-+	
-+	.macro PTREGSCALL label, func, arg
-+	.globl \label
-+\label:
-+	leaq \func(%rip),%rax
-+	leaq -ARGOFFSET+8(%rsp),\arg	/* 8 for return address */
-+	jmp  ia32_ptregs_common	
-+	.endm
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++# CONFIG_IP_ROUTE_VERBOSE is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=y
++# CONFIG_NET_IPGRE is not set
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
 +
-+	CFI_STARTPROC32
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++CONFIG_IPV6=y
++CONFIG_IPV6_PRIVACY=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_INET6_AH=y
++CONFIG_INET6_ESP=y
++CONFIG_INET6_IPCOMP=y
++CONFIG_INET6_XFRM_TUNNEL=y
++CONFIG_INET6_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_IPV6_TUNNEL=y
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
 +
-+	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
-+	PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
-+	PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
-+	PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
-+	PTREGSCALL stub32_execve, sys32_execve, %rcx
-+	PTREGSCALL stub32_fork, sys_fork, %rdi
-+	PTREGSCALL stub32_clone, sys32_clone, %rdx
-+	PTREGSCALL stub32_vfork, sys_vfork, %rdi
-+	PTREGSCALL stub32_iopl, sys_iopl, %rsi
-+	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=y
++CONFIG_NETFILTER_NETLINK_QUEUE=y
++CONFIG_NETFILTER_NETLINK_LOG=y
++CONFIG_NETFILTER_XTABLES=y
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
++CONFIG_NETFILTER_XT_TARGET_MARK=y
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
++CONFIG_NETFILTER_XT_MATCH_COMMENT=y
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
++CONFIG_NETFILTER_XT_MATCH_DCCP=y
++CONFIG_NETFILTER_XT_MATCH_ESP=y
++CONFIG_NETFILTER_XT_MATCH_HELPER=y
++CONFIG_NETFILTER_XT_MATCH_LENGTH=y
++CONFIG_NETFILTER_XT_MATCH_LIMIT=y
++CONFIG_NETFILTER_XT_MATCH_MAC=y
++CONFIG_NETFILTER_XT_MATCH_MARK=y
++CONFIG_NETFILTER_XT_MATCH_POLICY=y
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
++CONFIG_NETFILTER_XT_MATCH_QUOTA=y
++CONFIG_NETFILTER_XT_MATCH_REALM=y
++CONFIG_NETFILTER_XT_MATCH_SCTP=y
++CONFIG_NETFILTER_XT_MATCH_STATE=y
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
++CONFIG_NETFILTER_XT_MATCH_STRING=y
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
 +
-+ENTRY(ia32_ptregs_common)
-+	popq %r11
-+	CFI_ENDPROC
-+	CFI_STARTPROC32	simple
-+	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
-+	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
-+	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
-+	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
-+	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
-+	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
-+	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
-+/*	CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
-+/*	CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
-+	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
-+/*	CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
-+	SAVE_REST
-+	call *%rax
-+	RESTORE_REST
-+	jmp  ia32_sysret	/* misbalances the return cache */
-+	CFI_ENDPROC
-+END(ia32_ptregs_common)
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=y
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=y
++CONFIG_IP_NF_FTP=y
++CONFIG_IP_NF_IRC=y
++# CONFIG_IP_NF_NETBIOS_NS is not set
++CONFIG_IP_NF_TFTP=y
++CONFIG_IP_NF_AMANDA=y
++CONFIG_IP_NF_PPTP=y
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=y
++CONFIG_IP_NF_MATCH_IPRANGE=y
++CONFIG_IP_NF_MATCH_TOS=y
++CONFIG_IP_NF_MATCH_RECENT=y
++CONFIG_IP_NF_MATCH_ECN=y
++CONFIG_IP_NF_MATCH_DSCP=y
++CONFIG_IP_NF_MATCH_AH=y
++CONFIG_IP_NF_MATCH_TTL=y
++CONFIG_IP_NF_MATCH_OWNER=y
++CONFIG_IP_NF_MATCH_ADDRTYPE=y
++CONFIG_IP_NF_MATCH_HASHLIMIT=y
++CONFIG_IP_NF_FILTER=y
++CONFIG_IP_NF_TARGET_REJECT=y
++CONFIG_IP_NF_TARGET_LOG=y
++CONFIG_IP_NF_TARGET_ULOG=y
++CONFIG_IP_NF_TARGET_TCPMSS=y
++CONFIG_IP_NF_NAT=y
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=y
++CONFIG_IP_NF_TARGET_REDIRECT=y
++CONFIG_IP_NF_TARGET_NETMAP=y
++CONFIG_IP_NF_TARGET_SAME=y
++CONFIG_IP_NF_NAT_SNMP_BASIC=y
++CONFIG_IP_NF_NAT_IRC=y
++CONFIG_IP_NF_NAT_FTP=y
++CONFIG_IP_NF_NAT_TFTP=y
++CONFIG_IP_NF_NAT_AMANDA=y
++CONFIG_IP_NF_NAT_PPTP=y
++CONFIG_IP_NF_MANGLE=y
++CONFIG_IP_NF_TARGET_TOS=y
++CONFIG_IP_NF_TARGET_ECN=y
++CONFIG_IP_NF_TARGET_DSCP=y
++CONFIG_IP_NF_TARGET_TTL=y
++CONFIG_IP_NF_TARGET_CLUSTERIP=y
++CONFIG_IP_NF_RAW=y
++CONFIG_IP_NF_ARPTABLES=y
++CONFIG_IP_NF_ARPFILTER=y
++CONFIG_IP_NF_ARP_MANGLE=y
 +
-+	.section .rodata,"a"
-+	.align 8
-+ia32_sys_call_table:
-+	.quad sys_restart_syscall
-+	.quad sys_exit
-+	.quad stub32_fork
-+	.quad sys_read
-+	.quad sys_write
-+	.quad compat_sys_open		/* 5 */
-+	.quad sys_close
-+	.quad sys32_waitpid
-+	.quad sys_creat
-+	.quad sys_link
-+	.quad sys_unlink		/* 10 */
-+	.quad stub32_execve
-+	.quad sys_chdir
-+	.quad compat_sys_time
-+	.quad sys_mknod
-+	.quad sys_chmod		/* 15 */
-+	.quad sys_lchown16
-+	.quad quiet_ni_syscall			/* old break syscall holder */
-+	.quad sys_stat
-+	.quad sys32_lseek
-+	.quad sys_getpid		/* 20 */
-+	.quad compat_sys_mount	/* mount  */
-+	.quad sys_oldumount	/* old_umount  */
-+	.quad sys_setuid16
-+	.quad sys_getuid16
-+	.quad compat_sys_stime	/* stime */		/* 25 */
-+	.quad sys32_ptrace	/* ptrace */
-+	.quad sys_alarm
-+	.quad sys_fstat	/* (old)fstat */
-+	.quad sys_pause
-+	.quad compat_sys_utime	/* 30 */
-+	.quad quiet_ni_syscall	/* old stty syscall holder */
-+	.quad quiet_ni_syscall	/* old gtty syscall holder */
-+	.quad sys_access
-+	.quad sys_nice	
-+	.quad quiet_ni_syscall	/* 35 */	/* old ftime syscall holder */
-+	.quad sys_sync
-+	.quad sys32_kill
-+	.quad sys_rename
-+	.quad sys_mkdir
-+	.quad sys_rmdir		/* 40 */
-+	.quad sys_dup
-+	.quad sys32_pipe
-+	.quad compat_sys_times
-+	.quad quiet_ni_syscall			/* old prof syscall holder */
-+	.quad sys_brk		/* 45 */
-+	.quad sys_setgid16
-+	.quad sys_getgid16
-+	.quad sys_signal
-+	.quad sys_geteuid16
-+	.quad sys_getegid16	/* 50 */
-+	.quad sys_acct
-+	.quad sys_umount			/* new_umount */
-+	.quad quiet_ni_syscall			/* old lock syscall holder */
-+	.quad compat_sys_ioctl
-+	.quad compat_sys_fcntl64		/* 55 */
-+	.quad quiet_ni_syscall			/* old mpx syscall holder */
-+	.quad sys_setpgid
-+	.quad quiet_ni_syscall			/* old ulimit syscall holder */
-+	.quad sys32_olduname
-+	.quad sys_umask		/* 60 */
-+	.quad sys_chroot
-+	.quad sys32_ustat
-+	.quad sys_dup2
-+	.quad sys_getppid
-+	.quad sys_getpgrp		/* 65 */
-+	.quad sys_setsid
-+	.quad sys32_sigaction
-+	.quad sys_sgetmask
-+	.quad sys_ssetmask
-+	.quad sys_setreuid16	/* 70 */
-+	.quad sys_setregid16
-+	.quad stub32_sigsuspend
-+	.quad compat_sys_sigpending
-+	.quad sys_sethostname
-+	.quad compat_sys_setrlimit	/* 75 */
-+	.quad compat_sys_old_getrlimit	/* old_getrlimit */
-+	.quad compat_sys_getrusage
-+	.quad sys32_gettimeofday
-+	.quad sys32_settimeofday
-+	.quad sys_getgroups16	/* 80 */
-+	.quad sys_setgroups16
-+	.quad sys32_old_select
-+	.quad sys_symlink
-+	.quad sys_lstat
-+	.quad sys_readlink		/* 85 */
-+#ifdef CONFIG_IA32_AOUT
-+	.quad sys_uselib
-+#else
-+	.quad quiet_ni_syscall
-+#endif
-+	.quad sys_swapon
-+	.quad sys_reboot
-+	.quad compat_sys_old_readdir
-+	.quad sys32_mmap		/* 90 */
-+	.quad sys_munmap
-+	.quad sys_truncate
-+	.quad sys_ftruncate
-+	.quad sys_fchmod
-+	.quad sys_fchown16		/* 95 */
-+	.quad sys_getpriority
-+	.quad sys_setpriority
-+	.quad quiet_ni_syscall			/* old profil syscall holder */
-+	.quad compat_sys_statfs
-+	.quad compat_sys_fstatfs		/* 100 */
-+	.quad sys_ioperm
-+	.quad compat_sys_socketcall
-+	.quad sys_syslog
-+	.quad compat_sys_setitimer
-+	.quad compat_sys_getitimer	/* 105 */
-+	.quad compat_sys_newstat
-+	.quad compat_sys_newlstat
-+	.quad compat_sys_newfstat
-+	.quad sys32_uname
-+	.quad stub32_iopl		/* 110 */
-+	.quad sys_vhangup
-+	.quad quiet_ni_syscall	/* old "idle" system call */
-+	.quad sys32_vm86_warning	/* vm86old */ 
-+	.quad compat_sys_wait4
-+	.quad sys_swapoff		/* 115 */
-+	.quad sys32_sysinfo
-+	.quad sys32_ipc
-+	.quad sys_fsync
-+	.quad stub32_sigreturn
-+	.quad stub32_clone		/* 120 */
-+	.quad sys_setdomainname
-+	.quad sys_uname
-+	.quad sys_modify_ldt
-+	.quad compat_sys_adjtimex
-+	.quad sys32_mprotect		/* 125 */
-+	.quad compat_sys_sigprocmask
-+	.quad quiet_ni_syscall		/* create_module */
-+	.quad sys_init_module
-+	.quad sys_delete_module
-+	.quad quiet_ni_syscall		/* 130  get_kernel_syms */
-+	.quad sys_quotactl
-+	.quad sys_getpgid
-+	.quad sys_fchdir
-+	.quad quiet_ni_syscall	/* bdflush */
-+	.quad sys_sysfs		/* 135 */
-+	.quad sys_personality
-+	.quad quiet_ni_syscall	/* for afs_syscall */
-+	.quad sys_setfsuid16
-+	.quad sys_setfsgid16
-+	.quad sys_llseek		/* 140 */
-+	.quad compat_sys_getdents
-+	.quad compat_sys_select
-+	.quad sys_flock
-+	.quad sys_msync
-+	.quad compat_sys_readv		/* 145 */
-+	.quad compat_sys_writev
-+	.quad sys_getsid
-+	.quad sys_fdatasync
-+	.quad sys32_sysctl	/* sysctl */
-+	.quad sys_mlock		/* 150 */
-+	.quad sys_munlock
-+	.quad sys_mlockall
-+	.quad sys_munlockall
-+	.quad sys_sched_setparam
-+	.quad sys_sched_getparam   /* 155 */
-+	.quad sys_sched_setscheduler
-+	.quad sys_sched_getscheduler
-+	.quad sys_sched_yield
-+	.quad sys_sched_get_priority_max
-+	.quad sys_sched_get_priority_min  /* 160 */
-+	.quad sys_sched_rr_get_interval
-+	.quad compat_sys_nanosleep
-+	.quad sys_mremap
-+	.quad sys_setresuid16
-+	.quad sys_getresuid16	/* 165 */
-+	.quad sys32_vm86_warning	/* vm86 */ 
-+	.quad quiet_ni_syscall	/* query_module */
-+	.quad sys_poll
-+	.quad compat_sys_nfsservctl
-+	.quad sys_setresgid16	/* 170 */
-+	.quad sys_getresgid16
-+	.quad sys_prctl
-+	.quad stub32_rt_sigreturn
-+	.quad sys32_rt_sigaction
-+	.quad sys32_rt_sigprocmask	/* 175 */
-+	.quad sys32_rt_sigpending
-+	.quad compat_sys_rt_sigtimedwait
-+	.quad sys32_rt_sigqueueinfo
-+	.quad stub32_rt_sigsuspend
-+	.quad sys32_pread		/* 180 */
-+	.quad sys32_pwrite
-+	.quad sys_chown16
-+	.quad sys_getcwd
-+	.quad sys_capget
-+	.quad sys_capset
-+	.quad stub32_sigaltstack
-+	.quad sys32_sendfile
-+	.quad quiet_ni_syscall		/* streams1 */
-+	.quad quiet_ni_syscall		/* streams2 */
-+	.quad stub32_vfork            /* 190 */
-+	.quad compat_sys_getrlimit
-+	.quad sys32_mmap2
-+	.quad sys32_truncate64
-+	.quad sys32_ftruncate64
-+	.quad sys32_stat64		/* 195 */
-+	.quad sys32_lstat64
-+	.quad sys32_fstat64
-+	.quad sys_lchown
-+	.quad sys_getuid
-+	.quad sys_getgid		/* 200 */
-+	.quad sys_geteuid
-+	.quad sys_getegid
-+	.quad sys_setreuid
-+	.quad sys_setregid
-+	.quad sys_getgroups	/* 205 */
-+	.quad sys_setgroups
-+	.quad sys_fchown
-+	.quad sys_setresuid
-+	.quad sys_getresuid
-+	.quad sys_setresgid	/* 210 */
-+	.quad sys_getresgid
-+	.quad sys_chown
-+	.quad sys_setuid
-+	.quad sys_setgid
-+	.quad sys_setfsuid		/* 215 */
-+	.quad sys_setfsgid
-+	.quad sys_pivot_root
-+	.quad sys_mincore
-+	.quad sys_madvise
-+	.quad compat_sys_getdents64	/* 220 getdents64 */
-+	.quad compat_sys_fcntl64	
-+	.quad quiet_ni_syscall		/* tux */
-+	.quad quiet_ni_syscall    	/* security */
-+	.quad sys_gettid	
-+	.quad sys_readahead	/* 225 */
-+	.quad sys_setxattr
-+	.quad sys_lsetxattr
-+	.quad sys_fsetxattr
-+	.quad sys_getxattr
-+	.quad sys_lgetxattr	/* 230 */
-+	.quad sys_fgetxattr
-+	.quad sys_listxattr
-+	.quad sys_llistxattr
-+	.quad sys_flistxattr
-+	.quad sys_removexattr	/* 235 */
-+	.quad sys_lremovexattr
-+	.quad sys_fremovexattr
-+	.quad sys_tkill
-+	.quad sys_sendfile64 
-+	.quad compat_sys_futex		/* 240 */
-+	.quad compat_sys_sched_setaffinity
-+	.quad compat_sys_sched_getaffinity
-+	.quad sys32_set_thread_area
-+	.quad sys32_get_thread_area
-+	.quad compat_sys_io_setup	/* 245 */
-+	.quad sys_io_destroy
-+	.quad compat_sys_io_getevents
-+	.quad compat_sys_io_submit
-+	.quad sys_io_cancel
-+	.quad sys_fadvise64		/* 250 */
-+	.quad quiet_ni_syscall 	/* free_huge_pages */
-+	.quad sys_exit_group
-+	.quad sys32_lookup_dcookie
-+	.quad sys_epoll_create
-+	.quad sys_epoll_ctl		/* 255 */
-+	.quad sys_epoll_wait
-+	.quad sys_remap_file_pages
-+	.quad sys_set_tid_address
-+	.quad compat_sys_timer_create
-+	.quad compat_sys_timer_settime	/* 260 */
-+	.quad compat_sys_timer_gettime
-+	.quad sys_timer_getoverrun
-+	.quad sys_timer_delete
-+	.quad compat_sys_clock_settime
-+	.quad compat_sys_clock_gettime	/* 265 */
-+	.quad compat_sys_clock_getres
-+	.quad compat_sys_clock_nanosleep
-+	.quad compat_sys_statfs64
-+	.quad compat_sys_fstatfs64
-+	.quad sys_tgkill		/* 270 */
-+	.quad compat_sys_utimes
-+	.quad sys32_fadvise64_64
-+	.quad quiet_ni_syscall	/* sys_vserver */
-+	.quad sys_mbind
-+	.quad compat_sys_get_mempolicy	/* 275 */
-+	.quad sys_set_mempolicy
-+	.quad compat_sys_mq_open
-+	.quad sys_mq_unlink
-+	.quad compat_sys_mq_timedsend
-+	.quad compat_sys_mq_timedreceive	/* 280 */
-+	.quad compat_sys_mq_notify
-+	.quad compat_sys_mq_getsetattr
-+	.quad compat_sys_kexec_load	/* reserved for kexec */
-+	.quad compat_sys_waitid
-+	.quad quiet_ni_syscall		/* 285: sys_altroot */
-+	.quad sys_add_key
-+	.quad sys_request_key
-+	.quad sys_keyctl
-+	.quad sys_ioprio_set
-+	.quad sys_ioprio_get		/* 290 */
-+	.quad sys_inotify_init
-+	.quad sys_inotify_add_watch
-+	.quad sys_inotify_rm_watch
-+	.quad sys_migrate_pages
-+	.quad compat_sys_openat		/* 295 */
-+	.quad sys_mkdirat
-+	.quad sys_mknodat
-+	.quad sys_fchownat
-+	.quad compat_sys_futimesat
-+	.quad sys32_fstatat		/* 300 */
-+	.quad sys_unlinkat
-+	.quad sys_renameat
-+	.quad sys_linkat
-+	.quad sys_symlinkat
-+	.quad sys_readlinkat		/* 305 */
-+	.quad sys_fchmodat
-+	.quad sys_faccessat
-+	.quad quiet_ni_syscall		/* pselect6 for now */
-+	.quad quiet_ni_syscall		/* ppoll for now */
-+	.quad sys_unshare		/* 310 */
-+	.quad compat_sys_set_robust_list
-+	.quad compat_sys_get_robust_list
-+	.quad sys_splice
-+	.quad sys_sync_file_range
-+	.quad sys_tee
-+	.quad compat_sys_vmsplice
-+	.quad compat_sys_move_pages
-+ia32_syscall_end:		
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/ia32/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -27,9 +27,25 @@ quiet_cmd_syscall = SYSCALL $@
- 			   -Wl,-soname=linux-gate.so.1 -o $@ \
- 			   -Wl,-T,$(filter-out FORCE,$^)
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP6_NF_QUEUE is not set
++CONFIG_IP6_NF_IPTABLES=y
++CONFIG_IP6_NF_MATCH_RT=y
++CONFIG_IP6_NF_MATCH_OPTS=y
++CONFIG_IP6_NF_MATCH_FRAG=y
++CONFIG_IP6_NF_MATCH_HL=y
++CONFIG_IP6_NF_MATCH_OWNER=y
++CONFIG_IP6_NF_MATCH_IPV6HEADER=y
++CONFIG_IP6_NF_MATCH_AH=y
++CONFIG_IP6_NF_MATCH_EUI64=y
++CONFIG_IP6_NF_FILTER=y
++CONFIG_IP6_NF_TARGET_LOG=y
++CONFIG_IP6_NF_TARGET_REJECT=y
++CONFIG_IP6_NF_MANGLE=y
++CONFIG_IP6_NF_TARGET_HL=y
++CONFIG_IP6_NF_RAW=y
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=y
++CONFIG_BRIDGE_EBT_BROUTE=y
++CONFIG_BRIDGE_EBT_T_FILTER=y
++CONFIG_BRIDGE_EBT_T_NAT=y
++CONFIG_BRIDGE_EBT_802_3=y
++CONFIG_BRIDGE_EBT_AMONG=y
++CONFIG_BRIDGE_EBT_ARP=y
++CONFIG_BRIDGE_EBT_IP=y
++CONFIG_BRIDGE_EBT_LIMIT=y
++CONFIG_BRIDGE_EBT_MARK=y
++CONFIG_BRIDGE_EBT_PKTTYPE=y
++CONFIG_BRIDGE_EBT_STP=y
++CONFIG_BRIDGE_EBT_VLAN=y
++CONFIG_BRIDGE_EBT_ARPREPLY=y
++CONFIG_BRIDGE_EBT_DNAT=y
++CONFIG_BRIDGE_EBT_MARK_T=y
++CONFIG_BRIDGE_EBT_REDIRECT=y
++CONFIG_BRIDGE_EBT_SNAT=y
++CONFIG_BRIDGE_EBT_LOG=y
++CONFIG_BRIDGE_EBT_ULOG=y
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++CONFIG_VLAN_8021Q=y
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++CONFIG_NET_CLS_ROUTE=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=10240
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++CONFIG_IDE_TASK_IOCTL=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++# CONFIG_SCSI_PROC_FS is not set
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++CONFIG_SCSI_ISCSI_ATTRS=y
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_IBMVSCSI is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++CONFIG_SCSI_IPR=y
++# CONFIG_SCSI_IPR_TRACE is not set
++# CONFIG_SCSI_IPR_DUMP is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++# CONFIG_FUSION_SPI is not set
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Macintosh device drivers
++#
++# CONFIG_WINDFARM is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBMVETH=y
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++CONFIG_AMD8111_ETH=y
++# CONFIG_AMD8111E_NAPI is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++# CONFIG_E100 is not set
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++CONFIG_SKY2=y
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++# CONFIG_MV643XX_ETH is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_ICOM is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_HVC_DRIVER=y
++CONFIG_HVC_CONSOLE=y
++# CONFIG_HVC_RTAS is not set
++# CONFIG_HVCS is not set
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++CONFIG_I2C_AMD8111=y
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++CONFIG_VIDEO_V4L2=y
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++# CONFIG_VGA_CONSOLE is not set
++CONFIG_DUMMY_CONSOLE=y
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=y
++CONFIG_INFINIBAND_USER_MAD=y
++CONFIG_INFINIBAND_USER_ACCESS=y
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=y
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=y
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
++CONFIG_INFINIBAND_SRP=y
++# CONFIG_INFINIBAND_ISER is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=y
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++# CONFIG_JOLIET is not set
++# CONFIG_ZISOFS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf-8"
++# CONFIG_NLS_CODEPAGE_437 is not set
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++# CONFIG_NLS_ISO8859_1 is not set
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=y
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=y
++CONFIG_TEXTSEARCH_BM=y
++CONFIG_TEXTSEARCH_FSM=y
++CONFIG_PLIST=y
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=17
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++CONFIG_DEBUG_SLAB=y
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_SPINLOCK_SLEEP=y
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++CONFIG_DEBUG_FS=y
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_DEBUG_STACKOVERFLOW=y
++CONFIG_DEBUG_STACK_USAGE=y
++CONFIG_DEBUGGER=y
++CONFIG_XMON=y
++CONFIG_XMON_DEFAULT=y
++# CONFIG_IRQSTACKS is not set
++CONFIG_BOOTX_TEXT=y
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030202
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++CONFIG_XEN_NETDEV_LOOPBACK=y
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++# CONFIG_XEN_COMPAT_030002_AND_LATER is not set
++CONFIG_XEN_COMPAT_LATEST_ONLY=y
++# CONFIG_XEN_COMPAT_030002 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_DEVMEM=y
++CONFIG_XEN_SKBUFF=y
++CONFIG_XEN_REBOOT=y
++CONFIG_XEN_XENCOMM=y
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=y
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_DEFLATE=y
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/kernel/cpu_setup_power4.S
+--- a/arch/powerpc/kernel/cpu_setup_power4.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/kernel/cpu_setup_power4.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -73,6 +73,13 @@
+ 	blr
+ 
+ _GLOBAL(__setup_cpu_ppc970)
++	/*
++	 * Do nothing if not running in HV mode
++	 */
++	mfmsr	r0
++	rldicl.	r0,r0,4,63
++	beqlr
++
+ 	mfspr	r0,SPRN_HID0
+ 	li	r11,5			/* clear DOZE and SLEEP */
+ 	rldimi	r0,r11,52,8		/* set NAP and DPM */
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/kernel/prom_init.c
+--- a/arch/powerpc/kernel/prom_init.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/kernel/prom_init.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -188,6 +188,7 @@
+ #define PLATFORM_LPAR		0x0001
+ #define PLATFORM_POWERMAC	0x0400
+ #define PLATFORM_GENERIC	0x0500
++#define PLATFORM_GENERIC_XEN	(PLATFORM_GENERIC | PLATFORM_LPAR)
+ 
+ static int __initdata of_platform;
+ 
+@@ -1529,6 +1530,14 @@
+ #ifdef CONFIG_PPC64
+ 	phandle rtas;
+ 	int x;
++#endif
++#ifdef CONFIG_PPC_XEN
++	phandle xen;
++
++	xen = call_prom("finddevice", 1, 1, ADDR("/xen"));
++	if (PHANDLE_VALID(xen)) {
++		return PLATFORM_GENERIC_XEN;
++	}
+ #endif
  
-+$(obj)/vsyscall-int80.so \
- $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
- 	$(call if_changed,syscall)
+ 	/* Look for a PowerMac */
+@@ -2263,6 +2272,31 @@
+ 	if (RELOC(of_platform) == PLATFORM_PSERIES)
+ 		prom_initialize_tce_table();
+ #endif
++#ifdef CONFIG_PPC_XEN
++	if (RELOC(of_platform) & PLATFORM_LPAR) {
++		phandle xen;
++
++		prom_debug("XXX:checking for Xen OF package\n");
++
++		xen = call_prom("finddevice", 1, 1, ADDR("/xen"));
++		if (PHANDLE_VALID(xen)) {
++			u64 res[2];
++			int l;
++			ulong base;
++
++			l = prom_getprop(xen, "reserved", res, sizeof (res));
++			if (l != sizeof(res)) {
++				prom_panic("Xen reserved prop not exist\n");
++			}
++			
++			base = alloc_down(res[1], PAGE_SIZE, 0);
++			if (base != res[0]) {
++				prom_panic("XSI != alloc_down()\n");
++			}
++			reserve_mem(res[0], res[1]);
++		}
++	}	
++#endif
  
--AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
--AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
-+AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
+ 	/*
+ 	 * On non-powermacs, try to instantiate RTAS and puts all CPUs
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/kernel/setup-common.c
+--- a/arch/powerpc/kernel/setup-common.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/kernel/setup-common.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -387,6 +387,12 @@
+ 		}
+ 	}
+ 
++	if (machine_is(xen)) {
++		/* something more inteligent perhaps? */
++		for (cpu = 0; cpu < NR_CPUS; cpu++)
++			cpu_set(cpu, cpu_possible_map);
++	}
++
+ #ifdef CONFIG_PPC64
+ 	/*
+ 	 * On pSeries LPAR, we need to know how many cpus
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/kernel/udbg.c
+--- a/arch/powerpc/kernel/udbg.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/kernel/udbg.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -45,6 +45,9 @@
+ #elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES)
+ 	/* For iSeries - hit Ctrl-x Ctrl-x to see the output */
+ 	udbg_init_iseries();
++#elif defined(CONFIG_PPC_EARLY_DEBUG_XEN_DOM0) || \
++	defined(CONFIG_PPC_EARLY_DEBUG_XEN_DOMU)
++	udbg_init_xen();
+ #endif
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/kernel/vdso.c
+--- a/arch/powerpc/kernel/vdso.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/kernel/vdso.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -282,6 +282,13 @@
+ 	 * pages though
+ 	 */
+ 	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
++	/*
++	 * Make sure the vDSO gets into every core dump.
++	 * Dumping its contents makes post-mortem fully interpretable later
++	 * without matching up the same kernel and hardware config to see
++	 * what PC values meant.
++	 */
++	vma->vm_flags |= VM_ALWAYSDUMP;
+ 	vma->vm_flags |= mm->def_flags;
+ 	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
+ 	vma->vm_ops = &vdso_vmops;
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/mm/slb_low.S
+--- a/arch/powerpc/mm/slb_low.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/mm/slb_low.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -51,6 +51,23 @@
+ 	*/
+ 	bne	cr7,1f
+ 
++#ifdef CONFIG_PPC_XEN
++_GLOBAL(slb_miss_kernel_load_xen_nop)
++	b 3f
++	/* Need to check if it is in the part of our XEN Foreign Map */
++	rldicl	r9,r3,30,63		/* get Xen region */
++	cmpldi	cr7,r9,1		/* cmp this bit set to 1 */
++	bne	cr7,3f
++	/* Xen Linear mapping encoding bits, the "li" instruction below
++	 * could be patched below (like the other pages of the linear map)
++	 * if we ever wish to map anything other that 4K pages in 
++	 * this region, right now it is fine as zero.
++	 */
++_GLOBAL(slb_miss_kernel_load_xen_linear)
++	li	r11,0
++	b	slb_finish_load
++3:
++#endif
+ 	/* Linear mapping encoding bits, the "li" instruction below will
+ 	 * be patched by the kernel at boot
+ 	 */
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/Makefile
+--- a/arch/powerpc/platforms/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/platforms/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,6 +12,8 @@
+ obj-$(CONFIG_PPC_86xx)		+= 86xx/
+ obj-$(CONFIG_PPC_PSERIES)	+= pseries/
+ obj-$(CONFIG_PPC_ISERIES)	+= iseries/
++# must occur before xen hosting platforms
++obj-$(CONFIG_PPC_XEN)		+= xen/
+ obj-$(CONFIG_PPC_MAPLE)		+= maple/
+ obj-$(CONFIG_PPC_CELL)		+= cell/
+ obj-$(CONFIG_EMBEDDED6xx)	+= embedded6xx/
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/pseries/iommu.c
+--- a/arch/powerpc/platforms/pseries/iommu.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/platforms/pseries/iommu.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -531,6 +531,17 @@
+ 	 * already allocated.
+ 	 */
+ 	dn = pci_device_to_OF_node(dev);
++	if (dn == NULL) {
++#ifdef CONFIG_PPC_XEN
++		/* this becomes possible for Xen Dom0 */
++		DBG("%s, dev %p (%s) has no OF devtree entree\n", __func__,
++		    dev, pci_name(dev));
++		return;
++#else
++		panic("%s, dev %p (%s) has no OF devtree entree\n", __func__,
++		      dev, pci_name(dev));
++#endif
++	}
+ 
+ 	for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
+ 	     pdn = pdn->parent) {
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,20 @@
++obj-y	+= gnttab.o
++obj-y	+= hcall.o
++obj-y	+= reboot.o
++obj-y	+= setup.o
++obj-y	+= smp.o
++obj-y	+= time.o
++obj-y	+= udbg_xen.o
++obj-y	+= xen_guest.o
++obj-y	+= xencomm.o
++
++# we need the latest __XEN_INTERFACE_VERSION__ (see xen-compat.h)
++CFLAGS_hcall.o += -D__XEN_TOOLS__
++
++ifndef CONFIG_XEN_BALLOON
++obj-y += balloon.o
++endif
++
++ifndef CONFIG_XEN_UTIL
++obj-y	+= util.o
++endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/balloon.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/balloon.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,82 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++/*
++ * FIXME: Port balloon driver, if ever
++ */
++
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++{
++	struct page *page, **pagevec;
++	int i;
++
++	pagevec = kmalloc(sizeof(*pagevec) * nr_pages, GFP_KERNEL);
++	if (pagevec == NULL)
++		return  NULL;
++
++	for (i = 0; i < nr_pages; i++) {
++		page = alloc_foreign_page();
++		BUG_ON(page == NULL);
++		pagevec[i] = page;
++		/* There is no real page backing us yet so it cannot
++		 * be scrubbed */
++	}
++
++	return pagevec;
++}
++
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++	int i;
++
++	if (pagevec == NULL)
++		return;
++
++	for (i = 0; i < nr_pages; i++) {
++		free_foreign_page(pagevec[i]);
++	}
++	
++	kfree(pagevec);
++}
++
++void balloon_dealloc_empty_page_range(
++	struct page *page, unsigned long nr_pages)
++{
++	__free_pages(page, get_order(nr_pages * PAGE_SIZE));
++}
++
++void balloon_update_driver_allowance(long delta)
++{
++}
++
++void balloon_release_driver_page(struct page *page)
++{
++	BUG();
++}
++
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/gnttab.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/gnttab.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,468 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/vmalloc.h>
++#include <linux/memory_hotplug.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/grant_table.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++#include <asm/io.h>
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/cacheflush.h>
++#include "setup.h"
++#include "../pseries/plpar_wrappers.h"
++
++#undef DEBUG
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(KERN_EMERG fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++#define NR_GRANT_FRAMES 4
++
++struct address_space xen_foreign_dummy_mapping;
++
++static ulong foreign_map_pfn;
++static ulong foreign_map_pgs;
++static unsigned long *foreign_map_bitmap;
++
++
++/* hijack _mapcount */
++static inline int gnt_mapcount(struct page *page)
++{
++	return atomic_read(&(page)->_mapcount) + 1;
++}
++
++static inline int gnt_map(struct page *page)
++{
++	/* return true is transition from -1 to 0 */
++	return atomic_inc_and_test(&page->_mapcount);
++}
++
++static inline int gnt_unmap(struct page *page)
++{
++	int val;
++
++	val = atomic_dec_return(&page->_mapcount);
++	if (val < -1) {
++		atomic_inc(&page->_mapcount);
++		printk(KERN_EMERG "%s: %d\n", __func__, val);
++	}
++
++	return (val == -1);
++}
++
++
++static long map_to_linear(ulong paddr)
++{
++	unsigned long vaddr;
++	int psize;
++	unsigned long mode;
++	int slot;
++	uint shift;
++	unsigned long tmp_mode;
++
++	psize = MMU_PAGE_4K;
++	shift = mmu_psize_defs[psize].shift;
++	mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
++	vaddr = (ulong)__va(paddr);
++
++	{
++		unsigned long vpn, hash, hpteg;
++		unsigned long vsid = get_kernel_vsid(vaddr);
++		unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
++
++		vpn = va >> shift;
++		tmp_mode = mode;
++		
++		/* Make non-kernel text non-executable */
++		if (!in_kernel_text(vaddr))
++			tmp_mode = mode | HPTE_R_N;
++
++		hash = hpt_hash(va, shift);
++		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
++
++		BUG_ON(!ppc_md.hpte_insert);
++		slot = ppc_md.hpte_insert(hpteg, va, paddr,
++					  tmp_mode, HPTE_V_BOLTED, psize);
++		if (slot < 0)
++			printk(KERN_EMERG
++			       "%s: no more bolted entries "
++			       "HTAB[0x%lx]: 0x%lx\n",
++			       __func__, hpteg, paddr);
++	}
++	return slot;
++}
++
++static unsigned long get_hpte_vsid(ulong slot)
++{
++ 	unsigned long dword0;
++	unsigned long lpar_rc;
++	unsigned long dummy_word1;
++	unsigned long flags;
++
++	/* Read 1 pte at a time                        */
++	/* Do not need RPN to logical page translation */
++	/* No cross CEC PFT access                     */
++	flags = 0;
++
++	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
++
++	BUG_ON(lpar_rc != H_SUCCESS);
++
++	return dword0;
++}
++
++static long find_hpte_slot(unsigned long va, int psize)
++{
++	unsigned long hash;
++	unsigned long i, j;
++	long slot;
++	unsigned long want_v, hpte_v;
++
++	hash = hpt_hash(va, mmu_psize_defs[psize].shift);
++	want_v = hpte_encode_v(va, psize);
++
++	for (j = 0; j < 2; j++) {
++		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++		for (i = 0; i < HPTES_PER_GROUP; i++) {
++			hpte_v = get_hpte_vsid(slot);
++
++			if (HPTE_V_COMPARE(hpte_v, want_v)
++			    && (hpte_v & HPTE_V_VALID)
++			    && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
++				/* HPTE matches */
++				if (j)
++					slot = -slot;
++				return slot;
++			}
++			++slot;
++		}
++		hash = ~hash;
++	}
++
++	return -1;
++} 
++
++static long find_map_slot(ulong ea)
++{
++	int psize = MMU_PAGE_4K;
++	ulong vsid;
++	ulong va;
++
++	vsid = get_kernel_vsid(ea);
++	va = (vsid << 28) | (ea & 0x0fffffff);
++	
++	return find_hpte_slot(va, psize);
++}
++
++
++static void gnttab_pre_unmap_grant_ref(
++	struct gnttab_unmap_grant_ref *unmap, int count)
++{
++	long slot;
++	int i;
++	ulong ea;
++	unsigned long dummy1, dummy2;
++	ulong flags;
++
++	/* paranoia */
++	local_irq_save(flags);
++
++	for (i = 0 ; i < count; i++) {
++		struct page *page;
++
++		ea = (ulong)__va(unmap[i].host_addr);
++		page = virt_to_page(ea);
++		
++		if (!gnt_unmap(page)) {
++			DBG("%s[0x%x]: skip: 0x%lx, mapcount 0x%x\n",
++			    __func__, i, ea, gnt_mapcount(page));
++			continue;
++		}
++		slot = find_map_slot(ea);
++		if (slot < 0) {
++			printk(KERN_EMERG "%s: PTE not found: 0x%lx\n",
++			       __func__, ea);
++			continue;
++		}
++
++		DBG("%s[0x%x]: 0x%lx: mapcount: 0x%x\n",
++		    __func__, i, ea, gnt_mapcount(page));
++		plpar_pte_remove(0, slot, 0, &dummy1, &dummy2);
++	}
++	local_irq_restore(flags);
++}
++
++static void gnttab_post_map_grant_ref(
++	struct gnttab_map_grant_ref *map, int count)
++{
++	int i;
++	long slot;
++	ulong flags;
++
++	/* paranoia */
++	local_irq_save(flags);
++
++	for (i = 0 ; i < count; i++) {
++		ulong pa = map[i].host_addr;
++		struct page *page;
++
++		if (map[i].status != GNTST_okay) {
++			printk(KERN_EMERG "%s: status, skip\n", __func__);
++			continue;
++		}
++
++		BUG_ON(pa < (foreign_map_pfn << PAGE_SHIFT));
++		BUG_ON(pa >= (foreign_map_pfn << PAGE_SHIFT) + 
++		       (foreign_map_pgs << PAGE_SHIFT));
++
++		page = virt_to_page(__va(pa));
++
++		if (gnt_map(page)) {
++#ifdef DEBUG			
++			/* we need to get smarted than this */
++			slot = find_map_slot((ulong)__va(pa));
++			if (slot >= 0) {
++				DBG("%s: redundant 0x%lx\n", __func__, pa);
++				continue;
++			}
++#endif
++			slot = map_to_linear(pa);
++			DBG("%s[0x%x]: 0x%lx, mapcount:0x%x\n",
++			    __func__, i, pa, gnt_mapcount(page));
++
++		} else {
++			DBG("%s[0x%x] skip 0x%lx, mapcount:0x%x\n",
++			    __func__, i, pa, gnt_mapcount(page));
++		}
++	}
++	local_irq_restore(flags);
++}
++
++int HYPERVISOR_grant_table_op(unsigned int cmd, void *op, unsigned int count)
++{
++	void *desc;
++	void *frame_list = NULL;
++	int argsize;
++	int ret = -ENOMEM;
++
++	switch (cmd) {
++	case GNTTABOP_map_grant_ref:
++		argsize = sizeof(struct gnttab_map_grant_ref);
++		break;
++	case GNTTABOP_unmap_grant_ref:
++		gnttab_pre_unmap_grant_ref(op, count);
++		argsize = sizeof(struct gnttab_unmap_grant_ref);
++		break;
++	case GNTTABOP_setup_table: {
++		struct gnttab_setup_table setup;
++
++		memcpy(&setup, op, sizeof(setup));
++		argsize = sizeof(setup);
++
++		frame_list = xencomm_map(
++			xen_guest_handle(setup.frame_list),
++			(sizeof(*xen_guest_handle(setup.frame_list)) 
++			* setup.nr_frames));
++
++		if (frame_list == NULL)
++			return -ENOMEM;
++
++		set_xen_guest_handle(setup.frame_list, frame_list);
++		memcpy(op, &setup, sizeof(setup));
++		}
++		break;
++	case GNTTABOP_dump_table:
++		argsize = sizeof(struct gnttab_dump_table);
++		break;
++	case GNTTABOP_transfer:
++		BUG();
++		argsize = sizeof(struct gnttab_transfer);
++		break;
++	case GNTTABOP_copy:
++		argsize = sizeof(struct gnttab_transfer);
++		break;
++	case GNTTABOP_query_size:
++		argsize = sizeof(struct gnttab_query_size);
++		break;
++	default:
++		printk(KERN_EMERG "%s: unknown grant table op %d\n",
++		       __func__, cmd);
++		return -ENOSYS;
++	}
++
++	desc = xencomm_map_no_alloc(op, argsize);
++	if (desc) {
++		ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_grant_table_op),
++					 cmd, desc, count);
++		if (!ret && cmd == GNTTABOP_map_grant_ref)
++			gnttab_post_map_grant_ref(op, count);
++		xencomm_free(desc);
++	}
++	xencomm_free(frame_list);
++
++	return ret;
++}
++EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
++
++static ulong find_grant_maps(void)
++{
++	struct device_node *xen;
++	u64 *gm;
++	u64 _gm[2];
++	u64 expect;
++
++	/* This value is currently hardcoded into the SLB logic that
++	 * it written in assempler, See
++	 * slb_miss_kernel_load_xen_linear for more information.
++	 * Anything else and we can not run. */
++	expect = 34 - PAGE_SHIFT;
++
++	xen = of_find_node_by_path("/xen");
++
++	/* 
++	 * The foreign is 2x2 Cells.
++	 * The first entry is log2 of the base page frame.
++	 * The second is the number of pages
++	 */
++	gm = (u64 *)get_property(xen, "foreign-map", NULL);
++	if (gm == NULL) {
++		if (!is_initial_xendomain()) {
++			printk("OF: /xen/foreign-map not present\n");
++			_gm[0] = expect;
++			_gm[1] = 2048;
++			gm = _gm;
++		} else
++			panic("OF: /xen/foreign-map must be present\n");
++	}
++
++	if (gm[0] != expect)
++		panic("foreign-map is 0x%lx, expect 0x%lx\n",
++		      gm[0], expect);
++
++	foreign_map_pfn = 1UL << gm[0];
++	return gm[1];
++}
++
++static void setup_foreign_segment(void)
++{
++	extern int *slb_miss_kernel_load_xen_nop;
++	ulong iaddr = (ulong)slb_miss_kernel_load_xen_nop;
++
++	/* By default Linux will branch around this logic we replace
++	 * the branch with a NOP to turn the logic on */
++	*slb_miss_kernel_load_xen_nop = 0x60000000;
++	flush_icache_range(iaddr, iaddr + 4);
++}
++
++struct page *alloc_foreign_page(void)
++{
++	ulong bit;
++	do {
++		bit = find_first_zero_bit(foreign_map_bitmap,
++					  foreign_map_pgs);
++		if (bit >= foreign_map_pgs)
++			return NULL;
++	} while (test_and_set_bit(bit, foreign_map_bitmap) == 1);
++
++	return pfn_to_page(foreign_map_pfn + bit);
++}
++
++void free_foreign_page(struct page *page)
++{
++	ulong bit = page_to_pfn(page) - foreign_map_pfn;
++
++	BUG_ON(bit >= foreign_map_pgs);
++	BUG_ON(!test_bit(bit, foreign_map_bitmap));
++
++	clear_bit(bit, foreign_map_bitmap);
++}
++
++static void setup_grant_area(void)
++{
++	ulong pgs;
++	int err;
++	struct zone *zone;
++	struct pglist_data *pgdata;
++	int nid;
++
++	pgs = find_grant_maps();
++	setup_foreign_segment();
++
++	printk("%s: Xen VIO will use a foreign address space of 0x%lx pages\n",
++	       __func__, pgs);
++
++	/* add pages to the zone */
++	nid = 0;
++	pgdata = NODE_DATA(nid);
++	zone = pgdata->node_zones;
++
++	err = __add_pages(zone, foreign_map_pfn, pgs);
++
++	if (err < 0) {
++		printk(KERN_EMERG "%s: add_pages(0x%lx, 0x%lx) = %d\n",
++		       __func__, foreign_map_pfn, pgs, err);
++		BUG();
++	}
++
++	/* create a bitmap to manage these pages */
++	foreign_map_bitmap = kmalloc(BITS_TO_LONGS(pgs) * sizeof(long),
++				     GFP_KERNEL);
++	if (foreign_map_bitmap == NULL) {
++		printk(KERN_EMERG 
++		       "%s: could not allocate foreign_map_bitmap to "
++		       "manage 0x%lx foreign pages\n", __func__, pgs);
++		BUG();
++	}
++	/* I'm paranoid so make sure we assign the top bits so we
++	 * don't give them away */
++	bitmap_fill(&foreign_map_bitmap[BITS_TO_LONGS(pgs) - 1],
++		    BITS_PER_LONG);
++	/* now clear all the real bits */
++	bitmap_zero(foreign_map_bitmap, pgs);
++
++	foreign_map_pgs = pgs;
++}
++
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++	void *shared;
++	ulong pa = frames[0] << PAGE_SHIFT;
++	static int resume;
++
++	shared = ioremap(pa, PAGE_SIZE * NR_GRANT_FRAMES);
++	BUG_ON(shared == NULL);
++	printk("%s: grant table at %p\n", __func__, shared);
++
++	/* no need to do the rest of this if we are resuming */
++	if (!resume)
++		setup_grant_area();
++
++	resume = 1;
++
++	return shared;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/hcall.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/hcall.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,891 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006, 2007
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ */
++
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/domctl.h>
++#include <xen/interface/sysctl.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/memory.h>
++#include <xen/interface/xencomm.h>
++#include <xen/interface/version.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/interface/xsm/acm_ops.h>
++#include <xen/interface/kexec.h>
++#include <xen/public/privcmd.h>
++#include <asm/hypercall.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm/hvcall.h>
++#include "setup.h"
++
++/* Xencomm notes:
++ *
++ * For kernel memory, we assume that virtually contiguous pages are also
++ * physically contiguous. This allows us to avoid creating descriptors for
++ * kernel hypercalls, such as console and event channel operations.
++ *
++ * In general, we need a xencomm descriptor to cover the top-level data
++ * structure (e.g. the domctl op), plus another for every embedded pointer to
++ * another data structure (i.e. for every GUEST_HANDLE).
++ */
++
++int HYPERVISOR_console_io(int cmd, int count, char *str)
++{
++	struct xencomm_handle *desc;
++	int rc;
++
++	desc = xencomm_map_no_alloc(str, count);
++	if (desc == NULL)
++		return -EINVAL;
++
++	rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_console_io),
++				  cmd, count, desc);
++
++	xencomm_free(desc);
++
++	return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_console_io);
++
++int HYPERVISOR_event_channel_op(int cmd, void *op)
++{
++	int rc;
++
++	struct xencomm_handle *desc =
++		xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
++	if (desc == NULL)
++		return -EINVAL;
++
++	rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_event_channel_op),
++				cmd, desc);
++
++	xencomm_free(desc);
++
++	return rc;
++
++}
++EXPORT_SYMBOL(HYPERVISOR_event_channel_op);
++
++int HYPERVISOR_xen_version(int cmd, void *arg)
++{
++	struct xencomm_handle *desc;
++	const unsigned long hcall = __HYPERVISOR_xen_version;
++	int argsize;
++	int rc;
++
++	switch (cmd) {
++	case XENVER_version:
++		/* do not actually pass an argument */
++		return plpar_hcall_norets(XEN_MARK(hcall), cmd, 0);
++	case XENVER_extraversion:
++		argsize = sizeof(xen_extraversion_t);
++		break;
++	case XENVER_compile_info:
++		argsize = sizeof(xen_compile_info_t);
++		break;
++	case XENVER_capabilities:
++		argsize = sizeof(xen_capabilities_info_t);
++		break;
++	case XENVER_changeset:
++		argsize = sizeof(xen_changeset_info_t);
++		break;
++	case XENVER_platform_parameters:
++		argsize = sizeof(xen_platform_parameters_t);
++		break;
++	case XENVER_pagesize:
++		if (arg == NULL)
++			argsize = 0;
++		else
++			argsize = sizeof(void *);
++		break;
++	case XENVER_get_features:
++		argsize = sizeof(xen_feature_info_t);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown version cmd %d\n", __func__, cmd);
++		return -ENOSYS;
++	}
++
++	/* desc could be NULL in the case of XENVER_pagesize with NULL arg */
++	desc = xencomm_map(arg, argsize);
++
++	rc = plpar_hcall_norets(XEN_MARK(hcall), cmd, desc);
++
++	xencomm_free(desc);
++
++	return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_xen_version);
++
++
++int HYPERVISOR_physdev_op(int cmd, void *op)
++{
++	struct xencomm_handle *desc =
++		xencomm_map_no_alloc(op, sizeof(physdev_op_t));
++	int rc;
++
++	if (desc == NULL)
++		return -EINVAL;
++
++	rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_physdev_op),
++				cmd, desc);
++
++	xencomm_free(desc);
++
++	return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_physdev_op);
++
++int HYPERVISOR_sched_op(int cmd, void *arg)
++{
++	int argsize = 0;
++	int rc = -EINVAL;
++	struct xencomm_handle *desc;
++	struct xencomm_handle *ports = NULL;
++
++	switch (cmd) {
++	case SCHEDOP_yield:
++	case SCHEDOP_block:
++		return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++					  cmd, 0);
++		break;
++
++	case SCHEDOP_poll: {
++		struct sched_poll sched_poll;
++
++		argsize = sizeof(struct sched_poll);
++
++		memcpy(&sched_poll, arg, sizeof(sched_poll));
++
++		ports = xencomm_map(
++				xen_guest_handle(sched_poll.ports),
++				(sizeof(evtchn_port_t) * sched_poll.nr_ports));
++
++		if (ports == NULL)
++			return -ENOMEM;
++
++		set_xen_guest_handle(sched_poll.ports, (evtchn_port_t *)ports);
++		memcpy(arg, &sched_poll, sizeof(sched_poll));
++
++		}
++		break;
++	case SCHEDOP_shutdown:
++		argsize = sizeof(struct sched_shutdown);
++		break;
++	case SCHEDOP_remote_shutdown:
++		argsize = sizeof(struct sched_remote_shutdown);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown sched op %d\n", __func__, cmd);
++		return -ENOSYS;
++	}
++
++	desc = xencomm_map_no_alloc(arg, argsize);
++	if (desc) {
++		rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++					cmd, desc);
++		xencomm_free(desc);
++	}
++
++	xencomm_free(ports);
++
++	return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_sched_op);
++
++int HYPERVISOR_suspend(unsigned long srec)
++{
++	int cmd = SCHEDOP_shutdown;
++	struct sched_shutdown sched_shutdown = {
++		.reason = SHUTDOWN_suspend,
++	};
++	struct xencomm_handle *desc;
++
++	desc = xencomm_map_no_alloc(&sched_shutdown, sizeof(struct sched_shutdown));
++
++	return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++				  cmd, desc, srec);
++}
++EXPORT_SYMBOL(HYPERVISOR_suspend);
++
++int HYPERVISOR_kexec_op(unsigned long op, void *args)
++{
++	unsigned long argsize;
++	struct xencomm_handle *desc;
++
++	switch (op) {
++		case KEXEC_CMD_kexec_get_range:
++			argsize = sizeof(struct xen_kexec_range);
++			break;
++		case KEXEC_CMD_kexec_load:
++			argsize = sizeof(struct xen_kexec_load);
++			break;
++		case KEXEC_CMD_kexec_unload:
++			argsize = sizeof(struct xen_kexec_load);
++			break;
++		case KEXEC_CMD_kexec:
++			argsize = sizeof(struct xen_kexec_exec);
++			break;
++		default:
++			return -ENOSYS;
++	}
++	desc = xencomm_map_no_alloc(args, argsize);
++
++	return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_kexec_op),
++				  op, desc);
++}
++EXPORT_SYMBOL(HYPERVISOR_kexec_op);
++
++int HYPERVISOR_poll(
++	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++	struct sched_poll sched_poll = {
++		.nr_ports = nr_ports,
++		.timeout = jiffies_to_ns(timeout)
++	};
++	set_xen_guest_handle(sched_poll.ports, ports);
++
++	return HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++}
++EXPORT_SYMBOL(HYPERVISOR_poll);
++
++typedef ulong (mf_t)(ulong arg0, ...);
++
++static mf_t *multicall_funcs[] = {
++	[__HYPERVISOR_grant_table_op] = (mf_t *)HYPERVISOR_grant_table_op,
++};
++
++int HYPERVISOR_multicall(void *call_list, int nr_calls)
++{
++	/* we blow out the multicall because the xencomm stuff is jsut
++	 * too tricky */
++	multicall_entry_t *mcl = (multicall_entry_t *)call_list;
++	multicall_entry_t *c;
++	int i;
++	mf_t *mf;
++	int res;
++	ulong flags;
++
++	/* let make sure all the calls are supported */
++	for (i = 0; i < nr_calls; i++) {
++		mf = multicall_funcs[mcl[i].op];
++		BUG_ON(mf == NULL);
++	}
++	/* disable interrupts until we are done all calls */
++	local_irq_save(flags);
++	for (i = 0; i < nr_calls; i++) {
++		/* lookup supported multicalls */
++		c = &mcl[i];
++		mf = multicall_funcs[c->op];
++		res = mf(c->args[0], c->args[1], c->args[2],
++			 c->args[3], c->args[4], c->args[5]);
++		c->result = res;
++	}
++	local_irq_restore(flags);
++	return 0;
++}
++EXPORT_SYMBOL(HYPERVISOR_multicall);
++
++
++/* privcmd operations: */
++
++static int xenppc_privcmd_domctl(privcmd_hypercall_t *hypercall)
++{
++	xen_domctl_t kern_op;
++	xen_domctl_t __user *user_op = (xen_domctl_t __user *)hypercall->arg[0];
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret = 0;
++
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
++		return -EFAULT;
++
++	if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION) {
++		printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++				kern_op.interface_version, XEN_DOMCTL_INTERFACE_VERSION);
++		return -EACCES;
++	}
++
++	op_desc = xencomm_map(&kern_op, sizeof(xen_domctl_t));
++	if (op_desc == NULL)
++		return -ENOMEM;
++
++	switch (kern_op.cmd) {
++	case XEN_DOMCTL_createdomain:
++	case XEN_DOMCTL_destroydomain:
++	case XEN_DOMCTL_pausedomain:
++	case XEN_DOMCTL_unpausedomain:
++	case XEN_DOMCTL_getdomaininfo:
++		break;
++	case XEN_DOMCTL_getmemlist:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getmemlist.buffer),
++			kern_op.u.getmemlist.max_pfns * sizeof(unsigned long));
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.getmemlist.buffer,
++				     (void *)desc);
++		break;
++	case XEN_DOMCTL_getpageframeinfo:
++		break;
++	case XEN_DOMCTL_getpageframeinfo2:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getpageframeinfo2.array),
++			kern_op.u.getpageframeinfo2.num);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
++				     (void *)desc);
++		break;
++	case XEN_DOMCTL_shadow_op:
++
++		if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap))
++		{
++			desc = xencomm_map(
++				xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
++				kern_op.u.shadow_op.pages * sizeof(unsigned long));
++
++			if (desc == NULL)
++				ret = -ENOMEM;
++
++			set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
++				    	 (void *)desc);
++		}
++		break;
++	case XEN_DOMCTL_max_mem:
++		break;
++	case XEN_DOMCTL_setvcpucontext:
++	case XEN_DOMCTL_getvcpucontext:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.vcpucontext.ctxt),
++			sizeof(vcpu_guest_context_t));
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.vcpucontext.ctxt,
++				     (void *)desc);
++		break;
++	case XEN_DOMCTL_getvcpuinfo:
++		break;
++	case XEN_DOMCTL_setvcpuaffinity:
++	case XEN_DOMCTL_getvcpuaffinity:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
++			(kern_op.u.vcpuaffinity.cpumap.nr_cpus + 7) / 8);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
++				     (void *)desc);
++		break;
++	case XEN_DOMCTL_max_vcpus:
++	case XEN_DOMCTL_scheduler_op:
++	case XEN_DOMCTL_setdomainhandle:
++	case XEN_DOMCTL_setdebugging:
++	case XEN_DOMCTL_irq_permission:
++	case XEN_DOMCTL_iomem_permission:
++	case XEN_DOMCTL_ioport_permission:
++	case XEN_DOMCTL_hypercall_init:
++	case XEN_DOMCTL_arch_setup:
++	case XEN_DOMCTL_settimeoffset:
++	case XEN_DOMCTL_real_mode_area:
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
++	}
++
++	if (ret)
++		goto out; /* error mapping the nested pointer */
++
++	ret = plpar_hcall_norets(XEN_MARK(hypercall->op),op_desc);
++
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
++		ret = -EFAULT;
++
++out:
++	xencomm_free(desc);
++	xencomm_free(op_desc);
++	return ret;
++}
++
++static int xenppc_privcmd_sysctl(privcmd_hypercall_t *hypercall)
++{
++	xen_sysctl_t kern_op;
++	xen_sysctl_t __user *user_op = (xen_sysctl_t __user *)hypercall->arg[0];
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret = 0;
++
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
++		return -EFAULT;
++
++	if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION) {
++		printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++				kern_op.interface_version, XEN_SYSCTL_INTERFACE_VERSION);
++		return -EACCES;
++	}
++
++	op_desc = xencomm_map(&kern_op, sizeof(xen_sysctl_t));
++
++	if (op_desc == NULL)
++		return -ENOMEM;
++
++	switch (kern_op.cmd) {
++	case XEN_SYSCTL_readconsole:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.readconsole.buffer),
++			kern_op.u.readconsole.count);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.readconsole.buffer,
++				     (void *)desc);
++		break;
++	case XEN_SYSCTL_tbuf_op:
++	case XEN_SYSCTL_physinfo:
++	case XEN_SYSCTL_sched_id:
++		break;
++	case XEN_SYSCTL_perfc_op:
++		/* XXX this requires *two* embedded xencomm mappings (desc and val),
++		 * and I don't feel like it right now. */
++		printk(KERN_ERR "%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
++	case XEN_SYSCTL_getdomaininfolist:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
++			kern_op.u.getdomaininfolist.max_domains *
++					sizeof(xen_domctl_getdomaininfo_t));
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
++				     (void *)desc);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++		return -ENOSYS;
++	}
++
++	if (ret)
++		goto out; /* error mapping the nested pointer */
++
++	ret = plpar_hcall_norets(XEN_MARK(hypercall->op), op_desc);
++
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
++		ret = -EFAULT;
++
++out:
++	xencomm_free(desc);
++	xencomm_free(op_desc);
++	return ret;
++}
++
++static int xenppc_privcmd_platform_op(privcmd_hypercall_t *hypercall)
++{
++	xen_platform_op_t kern_op;
++	xen_platform_op_t __user *user_op =
++			(xen_platform_op_t __user *)hypercall->arg[0];
++	struct xencomm_handle *op_desc;
++	struct xencomm_handle *desc = NULL;
++	int ret = 0;
++
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_platform_op_t)))
++		return -EFAULT;
++
++	if (kern_op.interface_version != XENPF_INTERFACE_VERSION) {
++		printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++				kern_op.interface_version, XENPF_INTERFACE_VERSION);
++		return -EACCES;
++	}
++
++	op_desc = xencomm_map(&kern_op, sizeof(xen_platform_op_t));
++
++	if (op_desc == NULL)
++		return -ENOMEM;
++
++	switch (kern_op.cmd) {
++	case XENPF_settime:
++	case XENPF_add_memtype:
++	case XENPF_del_memtype:
++	case XENPF_read_memtype:
++	case XENPF_microcode_update:
++	case XENPF_platform_quirk:
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown platform_op cmd %d\n", __func__,
++				kern_op.cmd);
++		return -ENOSYS;
++	}
++
++	if (ret)
++		goto out; /* error mapping the nested pointer */
++
++	ret = plpar_hcall_norets(XEN_MARK(hypercall->op), op_desc);
++
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_platform_op_t)))
++		ret = -EFAULT;
++
++out:
++	xencomm_free(desc);
++	xencomm_free(op_desc);
++	return ret;
++}
++
++int HYPERVISOR_memory_op(unsigned int cmd, void *arg)
++{
++	int ret;
++	struct xencomm_handle *op_desc;
++	xen_memory_reservation_t *mop;
++
++
++	mop = (xen_memory_reservation_t *)arg;
++
++	op_desc = xencomm_map(mop, sizeof(xen_memory_reservation_t));
++
++	if (op_desc == NULL)
++		return -ENOMEM;
++
++	switch (cmd) {
++	case XENMEM_increase_reservation:
++	case XENMEM_decrease_reservation:
++	case XENMEM_populate_physmap: {
++		struct xencomm_handle *desc = NULL;
++
++		if (xen_guest_handle(mop->extent_start)) {
++			desc = xencomm_map(
++				xen_guest_handle(mop->extent_start),
++				mop->nr_extents *
++				sizeof(*xen_guest_handle(mop->extent_start)));
++
++			if (desc == NULL) {
++				ret = -ENOMEM;
++				goto out;
++			}
++
++			set_xen_guest_handle(mop->extent_start,
++					     (void *)desc);
++		}
++
++		ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
++					cmd, op_desc);
++
++		xencomm_free(desc);
++		}
++		break;
++
++	case XENMEM_maximum_ram_page:
++		/* arg is NULL so we can call thru here */
++		ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
++					cmd, NULL);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown memory op %d\n", __func__, cmd);
++		ret = -ENOSYS;
++	}
++
++out:
++	xencomm_free(op_desc);
++	return ret;
++}
++EXPORT_SYMBOL(HYPERVISOR_memory_op);
++
++static int xenppc_privcmd_memory_op(privcmd_hypercall_t *hypercall)
++{
++	xen_memory_reservation_t kern_op;
++	xen_memory_reservation_t __user *user_op;
++	const unsigned long cmd = hypercall->arg[0];
++	int ret = 0;
++
++	user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
++	if (copy_from_user(&kern_op, user_op,
++			   sizeof(xen_memory_reservation_t)))
++		return -EFAULT;
++
++	ret = HYPERVISOR_memory_op(cmd, &kern_op);
++	if (ret >= 0) {
++		if (copy_to_user(user_op, &kern_op,
++				 sizeof(xen_memory_reservation_t)))
++			return -EFAULT;
++	}
++	return ret;
++}
++
++static int xenppc_privcmd_version(privcmd_hypercall_t *hypercall)
++{
++	return HYPERVISOR_xen_version(hypercall->arg[0],
++			(void *)hypercall->arg[1]);
++}
++
++static int xenppc_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
++{
++	struct xencomm_handle *desc;
++	unsigned int argsize;
++	int ret;
++
++	switch (hypercall->arg[0]) {
++	case EVTCHNOP_alloc_unbound:
++		argsize = sizeof(evtchn_alloc_unbound_t);
++		break;
++
++	case EVTCHNOP_status:
++		argsize = sizeof(evtchn_status_t);
++		break;
++
++	default:
++		printk(KERN_ERR "%s: unknown EVTCHNOP (%ld)\n",
++		       __func__, hypercall->arg[0]);
++		return -EINVAL;
++	}
++
++	desc = xencomm_map((void *)hypercall->arg[1], argsize);
++
++	if (desc == NULL)
++		return -ENOMEM;
++
++	ret = plpar_hcall_norets(XEN_MARK(hypercall->op), hypercall->arg[0],
++				desc);
++
++	xencomm_free(desc);
++	return ret;
++}
++
++static int xenppc_acmcmd_op(privcmd_hypercall_t *hypercall)
++{
++	xen_acmctl_t kern_op;
++	xen_acmctl_t __user *user_op = (xen_acmctl_t __user *)hypercall->arg[0];
++	void *op_desc;
++	void *desc = NULL, *desc2 = NULL, *desc3 = NULL, *desc4 = NULL;
++	int ret = 0;
++
++	if (copy_from_user(&kern_op, user_op, sizeof(xen_acmctl_t)))
++		return -EFAULT;
++
++	if (kern_op.interface_version != ACM_INTERFACE_VERSION) {
++		printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++				kern_op.interface_version, ACM_INTERFACE_VERSION);
++		return -EACCES;
++	}
++
++	op_desc = xencomm_map(&kern_op, sizeof(xen_acmctl_t));
++	if (op_desc == NULL)
++		return -ENOMEM;
++
++	switch (kern_op.cmd) {
++	case ACMOP_setpolicy:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.setpolicy.pushcache),
++			kern_op.u.setpolicy.pushcache_size);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.setpolicy.pushcache,
++		                     desc);
++		break;
++	case ACMOP_getpolicy:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getpolicy.pullcache),
++			kern_op.u.getpolicy.pullcache_size);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.getpolicy.pullcache,
++		                     desc);
++		break;
++	case ACMOP_dumpstats:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.dumpstats.pullcache),
++			kern_op.u.dumpstats.pullcache_size);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.dumpstats.pullcache,
++		                     desc);
++		break;
++	case ACMOP_getssid:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.getssid.ssidbuf),
++			kern_op.u.getssid.ssidbuf_size);
++
++		if (desc == NULL)
++			ret = -ENOMEM;
++
++		set_xen_guest_handle(kern_op.u.getssid.ssidbuf,
++		                     desc);
++		break;
++	case ACMOP_getdecision:
++		break;
++	case ACMOP_chgpolicy:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.change_policy.policy_pushcache),
++			kern_op.u.change_policy.policy_pushcache_size);
++		desc2 = xencomm_map(
++		 	 xen_guest_handle(kern_op.u.change_policy.del_array),
++		 	 kern_op.u.change_policy.delarray_size);
++		desc3 = xencomm_map(
++		 	 xen_guest_handle(kern_op.u.change_policy.chg_array),
++		 	 kern_op.u.change_policy.chgarray_size);
++		desc4 = xencomm_map(
++		 	 xen_guest_handle(kern_op.u.change_policy.err_array),
++		 	 kern_op.u.change_policy.errarray_size);
++
++		if (desc  == NULL || desc2 == NULL ||
++			desc3 == NULL || desc4 == NULL) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		set_xen_guest_handle(kern_op.u.change_policy.policy_pushcache,
++		                     desc);
++		set_xen_guest_handle(kern_op.u.change_policy.del_array,
++		                     desc2);
++		set_xen_guest_handle(kern_op.u.change_policy.chg_array,
++		                     desc3);
++		set_xen_guest_handle(kern_op.u.change_policy.err_array,
++		                     desc4);
++		break;
++	case ACMOP_relabeldoms:
++		desc = xencomm_map(
++			xen_guest_handle(kern_op.u.relabel_doms.relabel_map),
++			kern_op.u.relabel_doms.relabel_map_size);
++		desc2 = xencomm_map(
++			xen_guest_handle(kern_op.u.relabel_doms.err_array),
++			kern_op.u.relabel_doms.errarray_size);
++
++		if (desc  == NULL || desc2 == NULL) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		set_xen_guest_handle(kern_op.u.relabel_doms.relabel_map,
++		                     desc);
++		set_xen_guest_handle(kern_op.u.relabel_doms.err_array,
++		                     desc2);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown/unsupported acmctl cmd %d\n",
++		       __func__, kern_op.cmd);
++		return -ENOSYS;
++	}
++
++	if (ret)
++		goto out; /* error mapping the nested pointer */
++
++	ret = plpar_hcall_norets(XEN_MARK(hypercall->op),op_desc);
++
++	if (copy_to_user(user_op, &kern_op, sizeof(xen_acmctl_t)))
++		ret = -EFAULT;
++
++out:
++	xencomm_free(desc);
++	xencomm_free(desc2);
++	xencomm_free(desc3);
++	xencomm_free(desc4);
++	xencomm_free(op_desc);
++	return ret;
++}
++
++
++/* The PowerPC hypervisor runs in a separate address space from Linux
++ * kernel/userspace, i.e. real mode. We must therefore translate userspace
++ * pointers to something the hypervisor can make sense of. */
++int privcmd_hypercall(privcmd_hypercall_t *hypercall)
++{
++	switch (hypercall->op) {
++	case __HYPERVISOR_domctl:
++		return xenppc_privcmd_domctl(hypercall);
++	case __HYPERVISOR_sysctl:
++		return xenppc_privcmd_sysctl(hypercall);
++	case __HYPERVISOR_platform_op:
++		return xenppc_privcmd_platform_op(hypercall);
++	case __HYPERVISOR_memory_op:
++		return xenppc_privcmd_memory_op(hypercall);
++	case __HYPERVISOR_xen_version:
++		return xenppc_privcmd_version(hypercall);
++	case __HYPERVISOR_event_channel_op:
++		return xenppc_privcmd_event_channel_op(hypercall);
++	case __HYPERVISOR_acm_op:
++		return xenppc_acmcmd_op(hypercall);
++	default:
++		printk(KERN_ERR "%s: unknown hcall (%ld)\n", __func__, hypercall->op);
++		/* maybe we'll get lucky and the hcall needs no translation. */
++		return plpar_hcall_norets(XEN_MARK(hypercall->op),
++				hypercall->arg[0],
++				hypercall->arg[1],
++				hypercall->arg[2],
++				hypercall->arg[3],
++				hypercall->arg[4]);
++	}
++}
++
++int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
++{
++	int argsize;
++	const unsigned long hcall = __HYPERVISOR_vcpu_op;
++	struct xencomm_handle *desc;
++	int rc;
++
++	switch (cmd) {
++	case  VCPUOP_initialise:
++		argsize = sizeof(vcpu_guest_context_t);
++		break;
++	case VCPUOP_up:
++	case VCPUOP_down:
++	case VCPUOP_is_up:
++		return plpar_hcall_norets(XEN_MARK(hcall), cmd, vcpuid, 0);
++
++	case VCPUOP_get_runstate_info:
++		argsize = sizeof (vcpu_runstate_info_t);
++		break;
++	default:
++		printk(KERN_ERR "%s: unknown version cmd %d\n", __func__, cmd);
++		return -ENOSYS;
++	}
++
++	desc = xencomm_map_no_alloc(extra_args, argsize);
++
++	if (desc == NULL)
++		return -EINVAL;
++
++	rc = plpar_hcall_norets(XEN_MARK(hcall), cmd, vcpuid, desc);
++
++	xencomm_free(desc);
++
++	return rc;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/reboot.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/reboot.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,53 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/console.h>
++#include <xen/xencons.h>
++#include <asm/hypervisor.h>
++#include <asm/machdep.h>
++
++static void domain_machine_restart(char * __unused)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	HYPERVISOR_shutdown(SHUTDOWN_reboot);
++}
++
++static void domain_machine_power_off(void)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++
++void xen_reboot_init(struct machdep_calls *md)
++{
++	if (md != NULL) {
++		ppc_md.restart	 = md->restart;
++		ppc_md.power_off = md->power_off;
++		ppc_md.halt	 = md->halt;
++	} else {
++		ppc_md.restart	 = domain_machine_restart;
++		ppc_md.power_off = domain_machine_power_off;
++		ppc_md.halt	 = domain_machine_power_off;
++	}
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/setup.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,336 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#define DEBUG
++#define CONFIG_SHARE_MPIC
++
++#include <linux/module.h>
++#include <linux/rwsem.h>
++#include <linux/delay.h>
++#include <linux/console.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/sched.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#include <asm/udbg.h>
++#include <asm/pgtable.h>
++#include <asm/prom.h>
++#include <asm/iommu.h>
++#include <asm/mmu.h>
++#include <asm/abs_addr.h>
++#include <asm/machdep.h>
++#include <asm/hypervisor.h>
++#include <asm/time.h>
++#include <asm/pmc.h>
++#include "setup.h"
++
++#ifdef DEBUG
++#define DBG(fmt...) udbg_printf(fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++/* Apperently on other arches this could be used before its defined,
++ * this should not be the case in PPC */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)NULL;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++/* Raw start-of-day parameters from the hypervisor. */
++static start_info_t xsi;
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++extern struct machdep_calls mach_maple_md;
++extern void maple_pci_init(void);
++
++static unsigned long foreign_mfn_flag;
++
++/* Must be called with &vma->vm_mm->mmap_sem locked for write */
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++		unsigned long address, 
++		unsigned long mfn,
++		unsigned long size, 
++		pgprot_t prot,
++		domid_t  domid)
++{
++	int rc;
++
++	/* Set the MFN flag to tell Xen that this is not a PFN. */
++	printk("%s: mapping mfn 0x%lx (size 0x%lx) -> 0x%lx\n", __func__,
++			mfn, size, mfn | foreign_mfn_flag);
++	mfn = mfn | foreign_mfn_flag;
++
++	WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
++	rc = remap_pfn_range(vma, address, mfn, size, prot);
++
++	return rc;
++}
++
++static void __init xen_fw_feature_init(void)
++{
++	DBG(" -> %s\n", __func__);
++
++	powerpc_firmware_features = 0;
++
++	powerpc_firmware_features |= FW_FEATURE_LPAR;
++	powerpc_firmware_features |= FW_FEATURE_TCE | FW_FEATURE_DABR;
++		
++	printk(KERN_INFO "firmware_features = 0x%lx\n", 
++			powerpc_firmware_features);
++
++	DBG(" <- %s\n", __func__);
++}
++
++/* if these were global then I could get them from the pseries/setup.c */
++static int pseries_set_dabr(unsigned long dabr)
++{
++	return plpar_hcall_norets(H_SET_DABR, dabr);
++}
++
++static int pseries_set_xdabr(unsigned long dabr)
++{
++	/* We want to catch accesses from kernel and userspace */
++	return plpar_hcall_norets(H_SET_XDABR, dabr,
++			H_DABRX_KERNEL | H_DABRX_USER);
++}
++
++/* 
++ * Early initialization.
++ */
++static void __init xenppc_init_early(void)
++{
++	struct device_node *xen;
++
++	DBG(" -> %s\n", __func__);
++
++	xen = of_find_node_by_path("/xen");
++
++	xen_start_info = &xsi;
++
++	/* fill out start_info_t from devtree */
++	if ((char *)get_property(xen, "privileged", NULL))
++		xen_start_info->flags |= SIF_PRIVILEGED;
++	if ((char *)get_property(xen, "initdomain", NULL))
++		xen_start_info->flags |= SIF_INITDOMAIN;
++	xen_start_info->shared_info = *((u64 *)get_property(xen, 
++	   "shared-info", NULL));
++
++	/* only look for store and console for guest domains */
++	if (xen_start_info->flags == 0) {
++		struct device_node *console = of_find_node_by_path("/xen/console");
++		struct device_node *store = of_find_node_by_path("/xen/store");
++
++		xen_start_info->store_mfn = (*((u64 *)get_property(store,
++		   "reg", NULL))) >> PAGE_SHIFT;
++		xen_start_info->store_evtchn = *((u32 *)get_property(store,
++		   "interrupts", NULL));
++		xen_start_info->console.domU.mfn = (*((u64 *)get_property(console,
++		   "reg", NULL))) >> PAGE_SHIFT;
++		xen_start_info->console.domU.evtchn = *((u32 *)get_property(console,
++		   "interrupts", NULL));
++	}
++
++	HYPERVISOR_shared_info = __va(xen_start_info->shared_info);
++
++	udbg_init_xen();
++
++	DBG("xen_start_info at %p\n", xen_start_info);
++	DBG("    magic          %s\n", xen_start_info->magic);
++	DBG("    flags          %x\n", xen_start_info->flags);
++	DBG("    shared_info    %lx, %p\n",
++	    xen_start_info->shared_info, HYPERVISOR_shared_info);
++	DBG("    store_mfn      %llx\n", xen_start_info->store_mfn);
++	DBG("    store_evtchn   %x\n", xen_start_info->store_evtchn);
++	DBG("    console_mfn    %llx\n", xen_start_info->console.domU.mfn);
++	DBG("    console_evtchn %x\n", xen_start_info->console.domU.evtchn);
++
++	xen_setup_time(&mach_maple_md);
++
++	add_preferred_console("xvc", 0, NULL);
++
++	if (get_property(xen, "power-control", NULL))
++		xen_reboot_init(&mach_maple_md);
++	else
++		xen_reboot_init(NULL);
++
++	if (is_initial_xendomain()) {
++		u64 *mfnflag = (u64 *)get_property(xen, "mfn-flag", NULL);
++		if (mfnflag) {
++			foreign_mfn_flag = (1UL << mfnflag[0]);
++			printk("OF: using 0x%lx as foreign mfn flag\n", foreign_mfn_flag);
++		} else
++			printk("OF: /xen/mfn-base must be present it build guests\n");
++	}
++
++	/* get the domain features */
++	setup_xen_features();
++
++	DBG("Hello World I'm Maple Xen-LPAR!\n");
++
++	if (firmware_has_feature(FW_FEATURE_DABR))
++		ppc_md.set_dabr = pseries_set_dabr;
++	else if (firmware_has_feature(FW_FEATURE_XDABR))
++		ppc_md.set_dabr = pseries_set_xdabr;
++
++	iommu_init_early_pSeries();
++
++	DBG(" <- %s\n", __func__);
++}
++
++/*
++ * this interface is limiting
++ */
++static int running_on_xen;
++int is_running_on_xen(void)
++{
++	return running_on_xen;
++}
++EXPORT_SYMBOL(is_running_on_xen);
++
++static void xenppc_power_save(void)
++{
++	/* SCHEDOP_yield could immediately return. Instead, we
++	 * want to idle in the Xen idle domain, so use
++	 * SCHEDOP_block with a one-shot timer. */
++	/* XXX do tickless stuff here. See
++	 * linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c */
++	u64 now_ns = tb_to_ns(get_tb());
++	u64 offset_ns = jiffies_to_ns(1);
++	int rc;
++
++	rc = HYPERVISOR_set_timer_op(now_ns + offset_ns);
++	BUG_ON(rc != 0);
++
++	HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++}
++
++void __init xenppc_setup_arch(void)
++{
++	/* init to some ~sane value until calibrate_delay() runs */
++	loops_per_jiffy = 50000000;
++
++	/* Lookup PCI hosts */
++	if (is_initial_xendomain())
++		maple_pci_init();
++
++#ifdef CONFIG_DUMMY_CONSOLE
++	conswitchp = &dummy_con;
++#endif
++#ifdef CONFIG_SMP
++	/* let them fly */
++	xen_setup_smp();
++#endif
++
++	printk(KERN_INFO "Using Xen idle loop\n");
++}
++
++static int __init xen_probe_flat_dt(unsigned long node,
++				    const char *uname, int depth,
++				    void *data)
++{
++	if (depth != 1)
++		return 0;
++	if (strcmp(uname, "xen") != 0)
++ 		return 0;
++
++	running_on_xen = 1;
++
++	return 1;
++}
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++/* forward ref */
++struct machdep_calls __initdata xen_md;
++static int __init xenppc_probe(void)
++{
++	of_scan_flat_dt(xen_probe_flat_dt, NULL);
++
++	if (!running_on_xen)
++		return 0;
++
++	xen_fw_feature_init();
++
++	hpte_init_lpar();
++
++	return 1;
++}
++
++static void __init xenppc_progress(char *s, unsigned short hex)
++{
++	printk("*** %04x : %s\n", hex, s ? s : "");
++}
++
++unsigned int xenppc_get_irq(struct pt_regs *regs)
++{
++	evtchn_do_upcall(regs);
++	/* evtchn_do_upcall() handles all pending event channels directly, so there
++	 * is nothing for do_IRQ() to do.
++	 * XXX This means we aren't using IRQ stacks. */
++	return NO_IRQ;
++}
++
++static void xenppc_enable_pmcs(void)
++{
++	unsigned long set, reset;
++
++	power4_enable_pmcs();
++
++	set = 1UL << 63;
++	reset = 0;
++	plpar_hcall_norets(H_PERFMON, set, reset);
++}
++
++#ifdef CONFIG_KEXEC
++void xen_machine_kexec(struct kimage *image)
++{
++	panic("%s(%p): called\n", __func__, image);
++}
++
++int xen_machine_kexec_prepare(struct kimage *image)
++{
++	panic("%s(%p): called\n", __func__, image);
++}
++
++void xen_machine_crash_shutdown(struct pt_regs *regs)
++{
++	panic("%s(%p): called\n", __func__, regs);
++}       
++#endif
++
++define_machine(xen) {
++	.name			= "Xen-Maple",
++	.probe			= xenppc_probe,
++	.setup_arch		= xenppc_setup_arch,
++	.init_early		= xenppc_init_early,
++	.init_IRQ		= xen_init_IRQ,
++	.get_irq		= xenppc_get_irq,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= xenppc_progress,
++	.power_save		= xenppc_power_save,
++	.enable_pmcs	= xenppc_enable_pmcs,
++#ifdef CONFIG_KEXEC
++	.machine_kexec		= xen_machine_kexec,
++	.machine_kexec_prepare	= xen_machine_kexec_prepare,
++	.machine_crash_shutdown	= xen_machine_crash_shutdown,
++#endif
++};
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/setup.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/setup.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,47 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <asm/machdep.h>
++#include <asm/time.h>
++
++extern void evtchn_init_IRQ(void);
++extern void xen_init_IRQ(void);
++extern void xen_reboot_init(struct machdep_calls *);
++extern void xen_maple_init_IRQ(void);
++extern unsigned int xen_get_irq(struct pt_regs *regs);
++
++static inline u64 tb_to_ns(u64 tb)
++{
++	if (likely(tb_ticks_per_sec)) {
++		return tb * (1000000000UL / tb_ticks_per_sec);
++	}
++	return 0;
++}
++
++static inline u64 jiffies_to_ns(unsigned long j) 
++{
++	return j * (1000000000UL / HZ);
++}
++
++extern struct page *alloc_foreign_page(void);
++extern void free_foreign_page(struct page *page);
++
++extern void __init xen_setup_time(struct machdep_calls *host_md);
++extern void xen_setup_smp(void);
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/smp.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/smp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,444 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/config.h>
++#include <linux/bootmem.h>
++#include <linux/irq.h>
++#include <linux/smp.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/vcpu.h>
++#include <xen/evtchn.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++#undef DEBUG
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(KERN_EMERG fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++static inline void *xen_of_alloc(ulong size)
++{
++	if (mem_init_done)
++		return kmalloc(size, GFP_KERNEL);
++	return alloc_bootmem(size);
++}
++static inline void xen_of_free(void *ptr)
++{
++	/* if this happens with the boot allocator then we are screwed */
++	BUG_ON(!mem_init_done);
++	kfree(ptr);
++}
++
++static struct property *dup_prop(struct property *op)
++{
++	struct property *np;
++	void *p;
++	ulong sz;
++
++
++	/* allocate everything in one go in case it fails */
++	sz = sizeof (*np); /* prop node */
++	sz += strlen(op->name) + 1; /* prop name */
++	sz += op->length; /* prop value */
++		
++	p = xen_of_alloc(sz);
++	if (!p)
++		return NULL;
++	memset(p, 0, sz);
++
++	/* prop node first */
++	np = p;
++	p += sizeof (*np);
++
++	/* value next becuase we want it aligned */
++	np->value = p;
++	p += op->length;
++
++	/* name */
++	np->name = p;
++
++	/* copy it all */
++	strcpy(np->name, op->name);
++	np->length = op->length;
++	memcpy(np->value, op->value, np->length);
++
++	return np;
++}
++
++static int dup_properties(struct device_node *dst, struct device_node *src)
++{
++	struct property *op;
++	struct property *np;
++	struct property *lp;
++	int rc = 0;
++
++	DBG("%s: duping to new cpu node: %s\n", __func__, dst->full_name);
++
++	np = lp = NULL;
++	for (op = src->properties; op != 0; op = op->next) {
++		lp = np;
++		np = dup_prop(op);
++		if (!np)
++			break;
++
++		prom_add_property(dst, np);
++	}
++
++	if (!np) {
++		DBG("%s: FAILED duping: %s\n", __func__, dst->full_name);
++		/* we could not allocate enuff so free what we have
++		 * allocated */
++		rc = -ENOMEM;
++		for (op = dst->properties; lp && op != lp; op = op->next)
++			xen_of_free(op);
++	}
++
++	return rc;
++}
++
++/* returns added device node so it can be added to procfs in the case
++ * of hotpluging */
++static struct device_node *xen_add_vcpu_node(struct device_node *boot_cpu,
++					     uint cpu)
++{
++	struct device_node *new_cpu;
++	struct property *pp;
++	void *p;
++	int sz;
++	int type_sz;
++	int name_sz;
++
++	DBG("%s: boot cpu: %s\n", __func__, boot_cpu->full_name);
++
++	/* allocate in one shot in case we fail */
++	name_sz = strlen(boot_cpu->name) + 1;
++	type_sz = strlen(boot_cpu->type) + 1;
++
++	sz = sizeof (*new_cpu);	/* the node */
++	sz += strlen(boot_cpu->full_name) + 3; /* full_name */
++	sz += name_sz; /* name */
++	sz += type_sz; /* type */
++
++	p = xen_of_alloc(sz);
++	if (!p)
++		return NULL;
++	memset(p, 0, sz);
++
++	/* the node */
++	new_cpu = p;
++	p += sizeof (*new_cpu);
++	
++	/* name */
++	new_cpu->name = p;
++	strcpy(new_cpu->name, boot_cpu->name);
++	p += name_sz;
++	
++	/* type */
++	new_cpu->type = p;
++	strcpy(new_cpu->type, boot_cpu->type);
++	p += type_sz;
++
++	/* full_name */
++	new_cpu->full_name = p;
++
++	/* assemble new full_name */
++	pp = of_find_property(boot_cpu, "name", NULL);
++	if (!pp)
++		panic("%s: no name prop\n", __func__);
++
++	DBG("%s: name is: %s = %s\n", __func__, pp->name, pp->value);
++	sprintf(new_cpu->full_name, "/cpus/%s@%u", pp->value, cpu);
++
++	if (dup_properties(new_cpu, boot_cpu)) {
++		xen_of_free(new_cpu);
++		return NULL;
++	}
++
++	/* fixup reg property */
++	DBG("%s: updating reg: %d\n", __func__, cpu);
++	pp = of_find_property(new_cpu, "reg", NULL);
++	if (!pp)
++		panic("%s: no reg prop\n", __func__);
++	*(int *)pp->value = cpu;
++
++	if (mem_init_done)
++		OF_MARK_DYNAMIC(new_cpu);
++
++	kref_init(&new_cpu->kref);
++
++	/* insert the node */
++	new_cpu->parent = of_get_parent(boot_cpu);
++	of_attach_node(new_cpu);
++	of_node_put(new_cpu->parent);
++
++	return new_cpu;
++}
++
++static void cpu_initialize_context(unsigned int vcpu, ulong entry)
++{
++	vcpu_guest_context_t ctxt;
++
++	memset(&ctxt.user_regs, 0x55, sizeof(ctxt.user_regs));
++
++	ctxt.user_regs.pc = entry;
++	ctxt.user_regs.msr = 0;
++	ctxt.user_regs.gprs[1] = 0; /* Linux uses its own stack */
++	ctxt.user_regs.gprs[3] = vcpu;
++
++	/* XXX verify this *** */
++	/* There is a buggy kernel that does not zero the "local_paca", so
++	 * we must make sure this register is 0 */
++	ctxt.user_regs.gprs[13] = 0;
++
++	DBG("%s: initializing vcpu: %d\n", __func__, vcpu);
++
++	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt))
++		panic("%s: VCPUOP_initialise failed, vcpu: %d\n",
++		       __func__, vcpu);
++
++}
++
++static int xen_start_vcpu(uint vcpu, ulong entry)
++{
++	DBG("%s: starting vcpu: %d\n", __func__, vcpu);
++
++	cpu_initialize_context(vcpu, entry);
++
++	DBG("%s: Spinning up vcpu: %d\n", __func__, vcpu);
++	return HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
++}
++
++extern void __secondary_hold(void);
++extern unsigned long __secondary_hold_spinloop;
++extern unsigned long __secondary_hold_acknowledge;
++
++static void xen_boot_secondary_vcpus(void)
++{
++	int vcpu;
++	int rc;
++	const unsigned long mark = (unsigned long)-1;
++	unsigned long *spinloop = &__secondary_hold_spinloop;
++	unsigned long *acknowledge = &__secondary_hold_acknowledge;
++#ifdef CONFIG_PPC64
++	/* __secondary_hold is actually a descriptor, not the text address */
++	unsigned long secondary_hold = __pa(*(unsigned long *)__secondary_hold);
++#else
++	unsigned long secondary_hold = __pa(__secondary_hold);
++#endif
++	struct device_node *boot_cpu;
++
++	DBG("%s: finding CPU node\n", __func__);
++	boot_cpu = of_find_node_by_type(NULL, "cpu");
++	if (!boot_cpu)
++		panic("%s: Cannot find Booting CPU node\n", __func__);
++
++	/* Set the common spinloop variable, so all of the secondary cpus
++	 * will block when they are awakened from their OF spinloop.
++	 * This must occur for both SMP and non SMP kernels, since OF will
++	 * be trashed when we move the kernel.
++	 */
++	*spinloop = 0;
++
++	DBG("%s: Searching for all vcpu numbers > 0\n", __func__);
++	/* try and start as many as we can */
++	for (vcpu = 1; vcpu < NR_CPUS; vcpu++) {
++		int i;
++
++		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, vcpu, NULL);
++		if (rc < 0)
++			continue;
++
++		DBG("%s: Found vcpu: %d\n", __func__, vcpu);
++		/* Init the acknowledge var which will be reset by
++		 * the secondary cpu when it awakens from its OF
++		 * spinloop.
++		 */
++		*acknowledge = mark;
++
++		DBG("%s: Starting vcpu: %d at pc: 0x%lx\n", __func__,
++		    vcpu, secondary_hold);
++		rc = xen_start_vcpu(vcpu, secondary_hold);
++		if (rc)
++			panic("%s: xen_start_vpcu() failed\n", __func__);
++
++
++		DBG("%s: Waiting for ACK on vcpu: %d\n", __func__, vcpu);
++		for (i = 0; (i < 100000000) && (*acknowledge == mark); i++)
++			mb();
++
++		if (*acknowledge == vcpu)
++			DBG("%s: Recieved for ACK on vcpu: %d\n",
++			    __func__, vcpu);
++
++		xen_add_vcpu_node(boot_cpu, vcpu);
++
++		cpu_set(vcpu, cpu_present_map);
++		set_hard_smp_processor_id(vcpu, vcpu);
++	}
++	of_node_put(boot_cpu);
++	DBG("%s: end...\n", __func__);
++}
++
++static int __init smp_xen_probe(void)
++{
++	return cpus_weight(cpu_present_map);
++}
++
++static irqreturn_t xen_ppc_msg_reschedule(int irq, void *dev_id,
++					  struct pt_regs *regs)
++{
++	smp_message_recv(PPC_MSG_RESCHEDULE, regs);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t xen_ppc_msg_call_function(int irq, void *dev_id,
++					     struct pt_regs *regs)
++{
++	smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t xen_ppc_msg_debugger_break(int irq, void *dev_id,
++					  struct pt_regs *regs)
++{
++	smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
++	return IRQ_HANDLED;
++}
++
++struct message {
++	irqreturn_t (*f)(int, void *, struct pt_regs *);
++	int num;
++	char *name;
++};
++static struct message ipi_msgs[] = {
++	{
++		.num = PPC_MSG_RESCHEDULE,
++		.f = xen_ppc_msg_reschedule,
++		.name = "IPI-resched"
++	},
++	{
++		.num = PPC_MSG_CALL_FUNCTION,
++		.f = xen_ppc_msg_call_function,
++		.name = "IPI-function"
++		},
++	{
++		.num = PPC_MSG_DEBUGGER_BREAK,
++		.f = xen_ppc_msg_debugger_break,
++		.name = "IPI-debug"
++	}
++};
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static void __devinit smp_xen_setup_cpu(int cpu)
++{
++	int irq;
++	int i;
++	const int nr_ipis = ARRAY_SIZE(__get_cpu_var(ipi_to_irq));
++
++	/* big scary include web could mess with our values, so we
++	 * make sure they are sane */
++	BUG_ON(ARRAY_SIZE(ipi_msgs) > nr_ipis);
++
++	for (i = 0; i < ARRAY_SIZE(ipi_msgs); i++) {
++		BUG_ON(ipi_msgs[i].num >= nr_ipis);
++
++		irq = bind_ipi_to_irqhandler(ipi_msgs[i].num,
++					     cpu,
++					     ipi_msgs[i].f,
++					     SA_INTERRUPT,
++					     ipi_msgs[i].name,
++					     NULL);
++		BUG_ON(irq < 0);
++		per_cpu(ipi_to_irq, cpu)[ipi_msgs[i].num] = irq;
++		DBG("%s: cpu: %d vector :%d irq: %d\n",
++		       __func__, cpu, ipi_msgs[i].num, irq);
++	}
++}
++
++static inline void send_IPI_one(unsigned int cpu, int vector)
++{
++	int irq;
++
++	irq = per_cpu(ipi_to_irq, cpu)[vector];
++	BUG_ON(irq < 0);
++
++	DBG("%s: cpu: %d vector :%d irq: %d!\n",
++	       __func__, cpu, vector, irq);
++	DBG("%s: per_cpu[%p]: %d %d %d %d\n",
++	       __func__, per_cpu(ipi_to_irq, cpu),
++	       per_cpu(ipi_to_irq, cpu)[0],
++	       per_cpu(ipi_to_irq, cpu)[1],
++	       per_cpu(ipi_to_irq, cpu)[2],
++	       per_cpu(ipi_to_irq, cpu)[3]);
++
++	notify_remote_via_irq(irq);
++}
++
++static void smp_xen_message_pass(int target, int msg)
++{
++	int cpu;
++
++	switch (msg) {
++	case PPC_MSG_RESCHEDULE:
++	case PPC_MSG_CALL_FUNCTION:
++	case PPC_MSG_DEBUGGER_BREAK:
++		break;
++	default:
++		panic("SMP %d: smp_message_pass: unknown msg %d\n",
++		       smp_processor_id(), msg);
++		return;
++	}
++	switch (target) {
++	case MSG_ALL:
++	case MSG_ALL_BUT_SELF:
++		for_each_online_cpu(cpu) {
++			if (target == MSG_ALL_BUT_SELF &&
++			    cpu == smp_processor_id())
++				continue;
++			send_IPI_one(cpu, msg);
++		}
++		break;
++	default:
++		send_IPI_one(target, msg);
++		break;
++	}
++}
++
++static struct smp_ops_t xen_smp_ops = {
++	.probe		= smp_xen_probe,
++	.message_pass	= smp_xen_message_pass,
++	.kick_cpu	= smp_generic_kick_cpu,
++	.setup_cpu	= smp_xen_setup_cpu,
++};
++
++void xen_setup_smp(void)
++{
++	smp_ops = &xen_smp_ops;
++
++	xen_boot_secondary_vcpus();
++	smp_release_cpus();
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/time.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/time.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,114 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <linux/time.h>
++#include <linux/rtc.h>
++#include <asm/hypervisor.h>
++#include <asm/machdep.h>
++#include <asm/time.h>
++#include <asm/udbg.h>
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++void time_resume(void)
++{
++	snapshot_timebase();
++}
++
++static inline ulong time_from_shared(void)
++{
++	ulong t;
++
++	DBG("tb_freq: %ld\n", ppc_tb_freq);
++
++	t = mftb() - HYPERVISOR_shared_info->arch.boot_timebase;
++	t /= ppc_tb_freq;
++	t += HYPERVISOR_shared_info->wc_sec;
++
++	return t;
++}
++
++static void (*host_md_get_rtc_time)(struct rtc_time *tm);
++static void xen_get_rtc_time(struct rtc_time *tm)
++{
++	if (is_initial_xendomain()) {
++		host_md_get_rtc_time(tm);
++		return;
++	} else {
++		ulong t;
++
++		t = time_from_shared();
++		to_tm(t, tm);
++	}
++}
++
++static int (*host_md_set_rtc_time)(struct rtc_time *tm);
++static int xen_set_rtc_time(struct rtc_time *tm)
++{
++	ulong sec;
++
++	if (is_initial_xendomain()) {
++		host_md_set_rtc_time(tm);
++		return 0;
++	}
++
++	sec = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday,
++		     tm->tm_hour, tm->tm_min, tm->tm_sec);
++
++	HYPERVISOR_shared_info->wc_sec = sec;
++	HYPERVISOR_shared_info->arch.boot_timebase = mftb();
++
++	return 0;
++}
++
++static unsigned long (*host_md_get_boot_time)(void);
++static unsigned long __init xen_get_boot_time(void)
++{
++	ulong t;
++
++	if (is_initial_xendomain()) {
++		t = host_md_get_boot_time();
++
++		HYPERVISOR_shared_info->wc_sec = t;
++		HYPERVISOR_shared_info->arch.boot_timebase = mftb();
++		DBG("%s: time: %ld\n", __func__, t);
++	} else {
++		t = time_from_shared();
++		DBG("%s: %ld\n", __func__, t);
++	}
++	return t;
++}
++
++void __init xen_setup_time(struct machdep_calls *host_md)
++{
++	ppc_md.get_boot_time = xen_get_boot_time;
++	host_md_get_boot_time = host_md->get_boot_time;
++
++	ppc_md.set_rtc_time = xen_set_rtc_time;
++	host_md_set_rtc_time = host_md->set_rtc_time;
++
++	ppc_md.get_rtc_time = xen_get_rtc_time;
++	host_md_get_rtc_time = host_md->get_rtc_time;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/udbg_xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/udbg_xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,164 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/console.h>
++#include <xen/evtchn.h>
++#include <asm/udbg.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++static void udbg_xen_wait(void)
++{
++	evtchn_port_t port = 0;
++
++	if (xen_start_info) {
++		port = xen_start_info->console.domU.evtchn;
++		clear_evtchn(port);
++	}
++	HYPERVISOR_poll(&port, 1, 10);
++}
++
++static int udbg_getc_xen(void)
++{
++	int ch;
++	for (;;) {
++		ch = udbg_getc_poll();
++		if (ch == -1) {
++			udbg_xen_wait();
++		} else {
++			return ch;
++		}
++	}
++}
++
++static void udbg_putc_dom0_xen(char c)
++{
++	unsigned long rc;
++
++	if (c == '\n')
++		udbg_putc_dom0_xen('\r');
++
++	do {
++		rc = HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
++	} while (rc < 0);
++}
++
++/* Buffered chars getc */
++static long inbuflen;
++static char inbuf[128];	/* Xen serial ring buffer */
++
++static int udbg_getc_poll_dom0_xen(void)
++{
++	/* The interface is tricky because it may return many chars.
++	 * We save them statically for future calls to udbg_getc().
++	 */
++	char ch, *buf = (char *)inbuf;
++	int i;
++
++	if (inbuflen == 0) {
++		/* get some more chars. */
++		inbuflen = HYPERVISOR_console_io(CONSOLEIO_read,
++						 sizeof(inbuf), buf);
++	}
++
++	if (inbuflen == 0)
++		return -1;
++
++	ch = buf[0];
++	for (i = 1; i < inbuflen; i++)	/* shuffle them down. */
++		buf[i-1] = buf[i];
++	inbuflen--;
++
++	return ch;
++}
++
++static struct xencons_interface *intf;
++
++static void udbg_putc_domu_xen(char c)
++{
++	XENCONS_RING_IDX cons, prod;
++
++	if (c == '\n')
++		udbg_putc_domu_xen('\r');
++
++	cons = intf->out_cons;
++	prod = intf->out_prod;
++	mb();
++
++	if ((prod - cons) < sizeof(intf->out))
++		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = c;
++
++	wmb();
++	intf->out_prod = prod;
++
++	if (xen_start_info)
++		notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++}
++
++static int udbg_getc_poll_domu_xen(void)
++{
++	XENCONS_RING_IDX cons, prod;
++	int c;
++
++	mb();
++	cons = intf->in_cons;
++	prod = intf->in_prod;
++	BUG_ON((prod - cons) > sizeof(intf->in));
++
++	if (cons == prod)
++		return -1;
++
++	c = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
++	wmb();
++	intf->in_cons = cons;
++
++	if (xen_start_info)
++		notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++
++	return c;
++}
++
++void udbg_init_xen(void)
++{
++	ulong __console_mfn = 0;
++
++	if (xen_start_info) {
++		/* we can find out where everything is */
++		if (!(xen_start_info->flags & SIF_INITDOMAIN))
++			__console_mfn = xen_start_info->console.domU.mfn;
++	} else {
++		/* VERY early printf */
++#ifdef CONFIG_PPC_EARLY_DEBUG_XEN_DOMU
++		__console_mfn = 0x3ffdUL;
++#endif
++	}
++
++	udbg_getc = udbg_getc_xen;
++	if (__console_mfn == 0) {
++		udbg_putc = udbg_putc_dom0_xen;
++		udbg_getc_poll = udbg_getc_poll_dom0_xen;
++	} else {
++		udbg_putc = udbg_putc_domu_xen;
++		udbg_getc_poll = udbg_getc_poll_domu_xen;
++		intf = (struct xencons_interface *)mfn_to_virt(__console_mfn);
++	}
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/util.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/util.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,70 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++#include "setup.h"
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++	struct vm_struct *area;
++	struct page *page;
++
++	page = alloc_foreign_page();
++	if (page == NULL) {
++		BUG();
++		return NULL;
++	}
++
++	area = kmalloc(sizeof(*area), GFP_KERNEL);
++	if (area != NULL) {
++		area->flags = VM_MAP;//XXX
++		area->addr = pfn_to_kaddr(page_to_pfn(page));
++		area->size = size;
++		area->pages = NULL; //XXX
++		area->nr_pages = size >> PAGE_SHIFT;
++		area->phys_addr = 0;
++	}
++	return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++	free_foreign_page(virt_to_page(area->addr));
++	kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++
++void lock_vm_area(struct vm_struct *area)
++{
++	preempt_disable();
++}
++
++void unlock_vm_area(struct vm_struct *area)
++{
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(unlock_vm_area);
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/xen_guest.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/xen_guest.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,27 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++	.section __xen_guest
++	.ascii	"GUEST_OS=linux"
++	.ascii  ",GUEST_VER=xen-3.0"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0xC000000000000000"
++	.ascii	",LOADER=generic"
++	.byte	0
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/platforms/xen/xencomm.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/powerpc/platforms/xen/xencomm.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,54 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/current.h>
++#include <xen/interface/arch-powerpc.h>
++#include <xen/xencomm.h>
++
++/* translate virtual address to physical address */
++unsigned long xencomm_vtop(unsigned long vaddr)
++{
++	struct page *page;
++	struct vm_area_struct *vma;
++
++	/* NULL is NULL */
++	if (vaddr == 0)
++		return 0;
++
++	if (is_kernel_addr(vaddr))
++		return __pa(vaddr);
++
++	/* XXX double-check (lack of) locking */
++	vma = find_extend_vma(current->mm, vaddr);
++	BUG_ON(!vma);
++	if (!vma)
++		return ~0UL;
++
++	page = follow_page(vma, vaddr, 0);
++	BUG_ON(!page);
++	if (!page)
++		return ~0UL;
++
++	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/sysdev/mpic.c
+--- a/arch/powerpc/sysdev/mpic.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/sysdev/mpic.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -764,6 +764,9 @@
+ #ifdef CONFIG_SMP
+ 	else if (hw >= MPIC_VEC_IPI_0) {
+ 		WARN_ON(!(mpic->flags & MPIC_PRIMARY));
++
++		if (mpic->flags & MPIC_SKIP_IPI_INIT)
++			return 0;
+ 
+ 		DBG("mpic: mapping as IPI\n");
+ 		set_irq_chip_data(virq, mpic);
+@@ -1019,6 +1022,9 @@
+ 			   (MPIC_VEC_TIMER_0 + i));
+ 	}
+ 
++	if (mpic->flags & MPIC_SKIP_IPI_INIT)
++		goto ipi_bailout;
++
+ 	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
+ 	mpic_test_broken_ipi(mpic);
+ 	for (i = 0; i < 4; i++) {
+@@ -1028,6 +1034,7 @@
+ 			       (MPIC_VEC_IPI_0 + i));
+ 	}
+ 
++ipi_bailout:
+ 	/* Initialize interrupt sources */
+ 	if (mpic->irq_count == 0)
+ 		mpic->irq_count = mpic->num_sources;
+diff -r d894e36cfc30 -r 0aa021803deb arch/powerpc/xmon/xmon.c
+--- a/arch/powerpc/xmon/xmon.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/powerpc/xmon/xmon.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -752,6 +752,9 @@
+ 			cmd = inchar();
+ 		}
+ 		switch (cmd) {
++		case 'A':
++			asm volatile(".long 0x200;nop");
++			break;
+ 		case 'm':
+ 			cmd = inchar();
+ 			switch (cmd) {
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/Kconfig
+--- a/arch/x86_64/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -135,6 +135,23 @@
+ 
+ endchoice
+ 
++config X86_64_XEN
++	bool "Enable Xen compatible kernel"
++	select XEN
++	select SWIOTLB
++	help
++	  This option will compile a kernel compatible with Xen hypervisor
++
++config X86_NO_TSS
++	bool
++	depends on X86_64_XEN
++	default y
++
++config X86_NO_IDT
++	bool
++	depends on X86_64_XEN
++	default y
++
+ #
+ # Define implied options from the CPU selection here
+ #
+@@ -155,6 +172,7 @@
+ 
+ config X86_TSC
+ 	bool
++	depends on !X86_64_XEN
+ 	default y
+ 
+ config X86_GOOD_APIC
+@@ -197,7 +215,7 @@
+ 
+ config X86_HT
+ 	bool
+-	depends on SMP && !MK8
++	depends on SMP && !MK8 && !X86_64_XEN
+ 	default y
+ 
+ config MATH_EMULATION
+@@ -211,14 +229,22 @@
+ 
+ config X86_IO_APIC
+ 	bool
++	depends !XEN_UNPRIVILEGED_GUEST
++	default y
++
++config X86_XEN_GENAPIC
++	bool
++	depends on X86_64_XEN
+ 	default y
+ 
+ config X86_LOCAL_APIC
+ 	bool
++	depends !XEN_UNPRIVILEGED_GUEST
+ 	default y
+ 
+ config MTRR
+ 	bool "MTRR (Memory Type Range Register) support"
++	depends on !XEN_UNPRIVILEGED_GUEST
+ 	---help---
+ 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ 	  the Memory Type Range Registers (MTRRs) may be used to control
+@@ -259,7 +285,7 @@
+ 
+ config SCHED_SMT
+ 	bool "SMT (Hyperthreading) scheduler support"
+-	depends on SMP
++	depends on SMP && !X86_64_XEN
+ 	default n
+ 	help
+ 	  SMT scheduler support improves the CPU scheduler's decision making
+@@ -269,7 +295,7 @@
+ 
+ config SCHED_MC
+ 	bool "Multi-core scheduler support"
+-	depends on SMP
++	depends on SMP && !X86_64_XEN
+ 	default y
+ 	help
+ 	  Multi-core scheduler support improves the CPU scheduler's decision
+@@ -280,7 +306,7 @@
+ 
+ config NUMA
+        bool "Non Uniform Memory Access (NUMA) Support"
+-       depends on SMP
++       depends on SMP && !X86_64_XEN
+        help
+ 	 Enable NUMA (Non Uniform Memory Access) support. The kernel 
+ 	 will try to allocate memory used by a CPU on the local memory 
+@@ -341,7 +367,7 @@
+ 
+ config ARCH_SPARSEMEM_ENABLE
+ 	def_bool y
+-	depends on (NUMA || EXPERIMENTAL)
++	depends on (NUMA || EXPERIMENTAL) && !X86_64_XEN
+ 
+ config ARCH_MEMORY_PROBE
+ 	def_bool y
+@@ -365,6 +391,7 @@
+ 	int "Maximum number of CPUs (2-256)"
+ 	range 2 255
+ 	depends on SMP
++	default "16" if X86_64_XEN
+ 	default "8"
+ 	help
+ 	  This allows you to specify the maximum number of CPUs which this
+@@ -387,6 +414,7 @@
+ 
+ config HPET_TIMER
+ 	bool
++	depends on !X86_64_XEN
+ 	default y
+ 	help
+ 	  Use the IA-PC HPET (High Precision Event Timer) to manage
+@@ -407,7 +435,7 @@
+ 	default y
+ 	select SWIOTLB
+ 	select AGP
+-	depends on PCI
++	depends on PCI && !X86_64_XEN
+ 	help
+ 	  Support for full DMA access of devices with 32bit memory access only
+ 	  on systems with more than 3GB. This is usually needed for USB,
+@@ -423,7 +451,7 @@
+ 	bool "IBM Calgary IOMMU support"
+ 	default y
+ 	select SWIOTLB
+-	depends on PCI && EXPERIMENTAL
++	depends on PCI && !X86_64_XEN && EXPERIMENTAL
+ 	help
+ 	  Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ 	  systems. Needed to run systems with more than 3GB of memory
+@@ -444,6 +472,7 @@
+ 
+ config X86_MCE
+ 	bool "Machine check support" if EMBEDDED
++	depends on !X86_64_XEN
+ 	default y
+ 	help
+ 	   Include a machine check error handler to report hardware errors.
+@@ -469,7 +498,7 @@
+ 
+ config KEXEC
+ 	bool "kexec system call (EXPERIMENTAL)"
+-	depends on EXPERIMENTAL
++	depends on EXPERIMENTAL && !XEN_UNPRIVILEGED_GUEST
+ 	help
+ 	  kexec is a system call that implements the ability to shutdown your
+ 	  current kernel, and to start another kernel.  It is like a reboot
+@@ -564,8 +593,9 @@
+ 	default y
+ 
+ menu "Power management options"
++	depends on !XEN_UNPRIVILEGED_GUEST
+ 
+-source kernel/power/Kconfig
++source "kernel/power/Kconfig"
+ 
+ source "drivers/acpi/Kconfig"
+ 
+@@ -587,6 +617,22 @@
+ config PCI_MMCONFIG
+ 	bool "Support mmconfig PCI config space access"
+ 	depends on PCI && ACPI
++
++config XEN_PCIDEV_FRONTEND
++	bool "Xen PCI Frontend"
++	depends on PCI && X86_64_XEN
++	select HOTPLUG
++	default y
++	help
++	  The PCI device frontend driver allows the kernel to import arbitrary
++	  PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++	bool "Xen PCI Frontend Debugging"
++	depends on XEN_PCIDEV_FRONTEND
++	default n
++	help
++	  Enables some debug statements within the PCI Frontend.
+ 
+ source "drivers/pci/pcie/Kconfig"
+ 
+@@ -658,4 +704,6 @@
+ 
+ source "crypto/Kconfig"
+ 
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/Makefile
+--- a/arch/x86_64/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -71,9 +71,22 @@
+ 
+ boot := arch/x86_64/boot
+ 
+-PHONY += bzImage bzlilo install archmrproper \
++PHONY += bzImage bzlilo vmlinuz install archmrproper \
+ 	 fdimage fdimage144 fdimage288 isoimage archclean
+ 
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++	-Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++LDFLAGS_vmlinux := -e startup_64
++#Default target when executing "make"
++all: vmlinuz
++
++BOOTIMAGE                     := $(boot)/vmlinuz
++KBUILD_IMAGE                  := $(BOOTIMAGE)
++
++vmlinuz: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
++else
+ #Default target when executing "make"
+ all: bzImage
+ 
+@@ -91,6 +104,7 @@
+ 
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
++endif
+ 
+ install:
+ 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/boot/Makefile
+--- a/arch/x86_64/boot/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/boot/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -26,7 +26,7 @@
+ #RAMDISK := -DRAMDISK=512
+ 
+ targets		:= vmlinux.bin bootsect bootsect.o \
+-		   setup setup.o bzImage mtools.conf
++		   setup setup.o bzImage mtools.conf vmlinuz vmlinux-stripped
+ 
+ EXTRA_CFLAGS := -m32
+ 
+@@ -131,5 +131,13 @@
+ 	cp System.map $(INSTALL_PATH)/
+ 	if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+ 
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++	$(call if_changed,gzip)
++	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++	$(call if_changed,objcopy)
++
+ install:
+ 	sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/Makefile
+--- a/arch/x86_64/ia32/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -14,11 +14,14 @@
+ audit-class-$(CONFIG_AUDIT) := audit.o
+ obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+ 
++syscall32-types-y := sysenter syscall
++syscall32-types-$(subst 1,$(CONFIG_XEN),$(shell expr $(CONFIG_XEN_COMPAT)0 '<' 0x0302000)) += int80
++
+ $(obj)/syscall32_syscall.o: \
+-	$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
++	$(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so)
+ 
+ # Teach kbuild about targets
+-targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++targets := $(foreach F,$(syscall32-types-y),vsyscall-$F.o vsyscall-$F.so)
+ 
+ # The DSO images are built using a special linker script
+ quiet_cmd_syscall = SYSCALL $@
+@@ -27,9 +30,10 @@
+ 			   -Wl,-soname=linux-gate.so.1 -o $@ \
+ 			   -Wl,-T,$(filter-out FORCE,$^)
+ 
+-$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
++$(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so): \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ 	$(call if_changed,syscall)
+ 
+-AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
+-AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
++AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
++AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
++AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/ia32_binfmt.c
+--- a/arch/x86_64/ia32/ia32_binfmt.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/ia32_binfmt.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -64,55 +64,6 @@
+ 
+ #define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
+ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+-
+-/*
+- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+- * extra segments containing the vsyscall DSO contents.  Dumping its
+- * contents makes post-mortem fully interpretable later without matching up
+- * the same kernel and hardware config to see what PC values meant.
+- * Dumping its extra ELF program headers includes all the other information
+- * a debugger needs to easily find how the vsyscall DSO was being used.
+- */
+-#define ELF_CORE_EXTRA_PHDRS	(find_vma(current->mm, VSYSCALL32_BASE) ?     \
+-    (VSYSCALL32_EHDR->e_phnum) : 0)
+-#define ELF_CORE_WRITE_EXTRA_PHDRS					      \
+-do {									      \
+-	if (find_vma(current->mm, VSYSCALL32_BASE)) { 			      \
+-		const struct elf32_phdr *const vsyscall_phdrs =		      \
+-			(const struct elf32_phdr *) (VSYSCALL32_BASE	      \
+-						   + VSYSCALL32_EHDR->e_phoff);\
+-		int i;							      \
+-		Elf32_Off ofs = 0;					      \
+-		for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) {	      \
+-			struct elf32_phdr phdr = vsyscall_phdrs[i];	      \
+-			if (phdr.p_type == PT_LOAD) {			      \
+-				BUG_ON(ofs != 0);			      \
+-				ofs = phdr.p_offset = offset;		      \
+-				phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz);      \
+-				phdr.p_filesz = phdr.p_memsz;		      \
+-				offset += phdr.p_filesz;		      \
+-			}						      \
+-			else						      \
+-				phdr.p_offset += ofs;			      \
+-			phdr.p_paddr = 0; /* match other core phdrs */	      \
+-			DUMP_WRITE(&phdr, sizeof(phdr));		      \
+-		}							      \
+-	}								      \
+-} while (0)
+-#define ELF_CORE_WRITE_EXTRA_DATA					      \
+-do {									      \
+-	if (find_vma(current->mm, VSYSCALL32_BASE)) { 			      \
+-		const struct elf32_phdr *const vsyscall_phdrs =		      \
+-			(const struct elf32_phdr *) (VSYSCALL32_BASE	      \
+-						   + VSYSCALL32_EHDR->e_phoff);      \
+-		int i;							      \
+-		for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) {	      \
+-			if (vsyscall_phdrs[i].p_type == PT_LOAD)	      \
+-				DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\
+-				    PAGE_ALIGN(vsyscall_phdrs[i].p_memsz));   \
+-		}							      \
+-	}								      \
+-} while (0)
+ 
+ struct elf_siginfo
+ {
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/ia32_signal.c
+--- a/arch/x86_64/ia32/ia32_signal.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/ia32_signal.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -113,25 +113,19 @@
+ }
+ 
+ asmlinkage long
+-sys32_sigsuspend(int history0, int history1, old_sigset_t mask,
+-		 struct pt_regs *regs)
++sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
+ {
+-	sigset_t saveset;
+-
+ 	mask &= _BLOCKABLE;
+ 	spin_lock_irq(&current->sighand->siglock);
+-	saveset = current->blocked;
++	current->saved_sigmask = current->blocked;
+ 	siginitset(&current->blocked, mask);
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 
+-	regs->rax = -EINTR;
+-	while (1) {
+-		current->state = TASK_INTERRUPTIBLE;
+-		schedule();
+-		if (do_signal(regs, &saveset))
+-			return -EINTR;
+-	}
++	current->state = TASK_INTERRUPTIBLE;
++	schedule();
++	set_thread_flag(TIF_RESTORE_SIGMASK);
++	return -ERESTARTNOHAND;
+ }
+ 
+ asmlinkage long
+@@ -508,11 +502,11 @@
+ 		current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+ 
+-	return 1;
++	return 0;
+ 
+ give_sigsegv:
+ 	force_sigsegv(sig, current);
+-	return 0;
++	return -EFAULT;
+ }
+ 
+ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+@@ -604,9 +598,9 @@
+ 		current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+ 
+-	return 1;
++	return 0;
+ 
+ give_sigsegv:
+ 	force_sigsegv(sig, current);
+-	return 0;
++	return -EFAULT;
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/ia32entry-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/ia32/ia32entry-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,666 @@
++/*
++ * Compatibility mode system call entry point for x86-64. 
++ * 		
++ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
++ */		 
++
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/ia32_unistd.h>	
++#include <asm/thread_info.h>	
++#include <asm/segment.h>
++#include <asm/vsyscall32.h>
++#include <asm/irqflags.h>
++#include <linux/linkage.h>
++
++#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
++
++	.macro IA32_ARG_FIXUP noebp=0
++	movl	%edi,%r8d
++	.if \noebp
++	.else
++	movl	%ebp,%r9d
++	.endif
++	xchg	%ecx,%esi
++	movl	%ebx,%edi
++	movl	%edx,%edx	/* zero extension */
++	.endm 
++
++	/* clobbers %eax */	
++	.macro  CLEAR_RREGS
++	xorl 	%eax,%eax
++	movq	%rax,R11(%rsp)
++	movq	%rax,R10(%rsp)
++	movq	%rax,R9(%rsp)
++	movq	%rax,R8(%rsp)
++	.endm
++
++	.macro LOAD_ARGS32 offset
++	movl \offset(%rsp),%r11d
++	movl \offset+8(%rsp),%r10d
++	movl \offset+16(%rsp),%r9d
++	movl \offset+24(%rsp),%r8d
++	movl \offset+40(%rsp),%ecx
++	movl \offset+48(%rsp),%edx
++	movl \offset+56(%rsp),%esi
++	movl \offset+64(%rsp),%edi
++	movl \offset+72(%rsp),%eax
++	.endm
++
++	.macro CFI_STARTPROC32 simple
++	CFI_STARTPROC	\simple
++	CFI_UNDEFINED	r8
++	CFI_UNDEFINED	r9
++	CFI_UNDEFINED	r10
++	CFI_UNDEFINED	r11
++	CFI_UNDEFINED	r12
++	CFI_UNDEFINED	r13
++	CFI_UNDEFINED	r14
++	CFI_UNDEFINED	r15
++	.endm
++
++/*
++ * 32bit SYSENTER instruction entry.
++ *
++ * Arguments:
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp user stack
++ * 0(%ebp) Arg6	
++ * 	
++ * Interrupts on.
++ *	
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below.	Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */ 	
++ENTRY(ia32_sysenter_target)
++	CFI_STARTPROC32	simple
++	CFI_DEF_CFA	rsp,SS+8-RIP+16
++	/*CFI_REL_OFFSET	ss,SS-RIP+16*/
++	CFI_REL_OFFSET	rsp,RSP-RIP+16
++	/*CFI_REL_OFFSET	rflags,EFLAGS-RIP+16*/
++	/*CFI_REL_OFFSET	cs,CS-RIP+16*/
++	CFI_REL_OFFSET	rip,RIP-RIP+16
++	CFI_REL_OFFSET	r11,8
++	CFI_REL_OFFSET	rcx,0
++	movq	8(%rsp),%r11
++	CFI_RESTORE	r11
++	popq	%rcx
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE	rcx
++ 	movl	%ebp,%ebp		/* zero extension */
++	movl	%eax,%eax
++	movl	$__USER32_DS,40(%rsp)
++	movq	%rbp,32(%rsp)
++	movl	$__USER32_CS,16(%rsp)
++	movl	$VSYSCALL32_SYSEXIT,8(%rsp)
++	movq	%rax,(%rsp)
++	cld
++	SAVE_ARGS 0,0,0
++ 	/* no need to do an access_ok check here because rbp has been
++ 	   32bit zero extended */ 
++1:	movl	(%rbp),%r9d
++ 	.section __ex_table,"a"
++ 	.quad 1b,ia32_badarg
++ 	.previous	
++	GET_THREAD_INFO(%r10)
++	orl    $TS_COMPAT,threadinfo_status(%r10)
++	testl  $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz  sysenter_tracesys
++sysenter_do_call:	
++	cmpl	$(IA32_NR_syscalls-1),%eax
++	ja	ia32_badsys
++	IA32_ARG_FIXUP 1
++	call	*ia32_sys_call_table(,%rax,8)
++	movq	%rax,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call
++
++sysenter_tracesys:
++	SAVE_REST
++	CLEAR_RREGS
++	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
++	call	syscall_trace_enter
++	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	movl	%ebp, %ebp
++	/* no need to do an access_ok check here because rbp has been
++	   32bit zero extended */ 
++1:	movl	(%rbp),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous
++	jmp	sysenter_do_call
++	CFI_ENDPROC
++ENDPROC(ia32_sysenter_target)
++
++/*
++ * 32bit SYSCALL instruction entry.
++ *
++ * Arguments:
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx return EIP 
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg2    [note: not saved in the stack frame, should not be touched]
++ * %esp user stack 
++ * 0(%esp) Arg6
++ * 	
++ * Interrupts on.
++ *	
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below.	Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.	
++ */ 	
++ENTRY(ia32_cstar_target)
++	CFI_STARTPROC32	simple
++	CFI_DEF_CFA	rsp,SS+8-RIP+16
++	/*CFI_REL_OFFSET	ss,SS-RIP+16*/
++	CFI_REL_OFFSET	rsp,RSP-RIP+16
++	/*CFI_REL_OFFSET	rflags,EFLAGS-RIP+16*/
++	/*CFI_REL_OFFSET	cs,CS-RIP+16*/
++	CFI_REL_OFFSET	rip,RIP-RIP+16
++	movl 	%eax,%eax	/* zero extension */
++	movl	RSP-RIP+16(%rsp),%r8d
++	SAVE_ARGS -8,1,1
++	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
++	movq	%rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
++	movl	%ebp,%ecx
++	movl	$__USER32_CS,CS-ARGOFFSET(%rsp)
++	movl	$__USER32_DS,SS-ARGOFFSET(%rsp)
++	/* no need to do an access_ok check here because r8 has been
++	   32bit zero extended */ 
++	/* hardware stack frame is complete now */	
++1:	movl	(%r8),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous	
++	GET_THREAD_INFO(%r10)
++	orl   $TS_COMPAT,threadinfo_status(%r10)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz   cstar_tracesys
++cstar_do_call:	
++	cmpl $IA32_NR_syscalls-1,%eax
++	ja  ia32_badsys
++	IA32_ARG_FIXUP 1
++	call *ia32_sys_call_table(,%rax,8)
++	movq %rax,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call
++	
++cstar_tracesys:	
++	SAVE_REST
++	CLEAR_RREGS
++	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq %rsp,%rdi        /* &pt_regs -> arg1 */
++	call syscall_trace_enter
++	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	movl RSP-ARGOFFSET(%rsp), %r8d
++	/* no need to do an access_ok check here because r8 has been
++	   32bit zero extended */ 
++1:	movl	(%r8),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous
++	jmp cstar_do_call
++END(ia32_cstar_target)
++				
++ia32_badarg:
++	movq $-EFAULT,%rax
++	jmp ia32_sysret
++	CFI_ENDPROC
++
++/* 
++ * Emulated IA32 system calls via int 0x80. 
++ *
++ * Arguments:	 
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg6    [note: not saved in the stack frame, should not be touched]
++ *
++ * Notes:
++ * Uses the same stack frame as the x86-64 version.	
++ * All registers except %eax must be saved (but ptrace may violate that)
++ * Arguments are zero extended. For system calls that want sign extension and
++ * take long arguments a wrapper is needed. Most calls can just be called
++ * directly.
++ * Assumes it is only called from user space and entered with interrupts on.
++ */ 				
++
++ENTRY(ia32_syscall)
++	CFI_STARTPROC	simple
++	CFI_DEF_CFA	rsp,SS+8-RIP+16
++	/*CFI_REL_OFFSET	ss,SS-RIP+16*/
++	CFI_REL_OFFSET	rsp,RSP-RIP+16
++	/*CFI_REL_OFFSET	rflags,EFLAGS-RIP+16*/
++	/*CFI_REL_OFFSET	cs,CS-RIP+16*/
++	CFI_REL_OFFSET	rip,RIP-RIP+16
++	CFI_REL_OFFSET	r11,8
++	CFI_REL_OFFSET	rcx,0
++	movq 8(%rsp),%r11
++	CFI_RESTORE	r11
++	popq %rcx
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE	rcx
++	movl %eax,%eax
++	movq %rax,(%rsp)
++	cld
++	/* note the registers are not zero extended to the sf.
++	   this could be a problem. */
++	SAVE_ARGS 0,0,1
++	GET_THREAD_INFO(%r10)
++	orl   $TS_COMPAT,threadinfo_status(%r10)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz ia32_tracesys
++ia32_do_syscall:	
++	cmpl $(IA32_NR_syscalls-1),%eax
++	ja  ia32_badsys
++	IA32_ARG_FIXUP
++	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
++ia32_sysret:
++	movq %rax,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call 
++
++ia32_tracesys:			 
++	SAVE_REST
++	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq %rsp,%rdi        /* &pt_regs -> arg1 */
++	call syscall_trace_enter
++	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	jmp ia32_do_syscall
++END(ia32_syscall)
++
++ia32_badsys:
++	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
++	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call
++
++quiet_ni_syscall:
++	movq $-ENOSYS,%rax
++	ret
++	CFI_ENDPROC
++	
++	.macro PTREGSCALL label, func, arg
++	.globl \label
++\label:
++	leaq \func(%rip),%rax
++	leaq -ARGOFFSET+8(%rsp),\arg	/* 8 for return address */
++	jmp  ia32_ptregs_common	
++	.endm
++
++	CFI_STARTPROC32
++
++	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
++	PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
++	PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
++	PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
++	PTREGSCALL stub32_execve, sys32_execve, %rcx
++	PTREGSCALL stub32_fork, sys_fork, %rdi
++	PTREGSCALL stub32_clone, sys32_clone, %rdx
++	PTREGSCALL stub32_vfork, sys_vfork, %rdi
++	PTREGSCALL stub32_iopl, sys_iopl, %rsi
++	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++
++ENTRY(ia32_ptregs_common)
++	popq %r11
++	CFI_ENDPROC
++	CFI_STARTPROC32	simple
++	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
++	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
++	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
++	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
++	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
++	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
++	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
++/*	CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
++/*	CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
++	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
++/*	CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
++	SAVE_REST
++	call *%rax
++	RESTORE_REST
++	jmp  ia32_sysret	/* misbalances the return cache */
++	CFI_ENDPROC
++END(ia32_ptregs_common)
++
++	.section .rodata,"a"
++	.align 8
++ia32_sys_call_table:
++	.quad sys_restart_syscall
++	.quad sys_exit
++	.quad stub32_fork
++	.quad sys_read
++	.quad sys_write
++	.quad compat_sys_open		/* 5 */
++	.quad sys_close
++	.quad sys32_waitpid
++	.quad sys_creat
++	.quad sys_link
++	.quad sys_unlink		/* 10 */
++	.quad stub32_execve
++	.quad sys_chdir
++	.quad compat_sys_time
++	.quad sys_mknod
++	.quad sys_chmod		/* 15 */
++	.quad sys_lchown16
++	.quad quiet_ni_syscall			/* old break syscall holder */
++	.quad sys_stat
++	.quad sys32_lseek
++	.quad sys_getpid		/* 20 */
++	.quad compat_sys_mount	/* mount  */
++	.quad sys_oldumount	/* old_umount  */
++	.quad sys_setuid16
++	.quad sys_getuid16
++	.quad compat_sys_stime	/* stime */		/* 25 */
++	.quad sys32_ptrace	/* ptrace */
++	.quad sys_alarm
++	.quad sys_fstat	/* (old)fstat */
++	.quad sys_pause
++	.quad compat_sys_utime	/* 30 */
++	.quad quiet_ni_syscall	/* old stty syscall holder */
++	.quad quiet_ni_syscall	/* old gtty syscall holder */
++	.quad sys_access
++	.quad sys_nice	
++	.quad quiet_ni_syscall	/* 35 */	/* old ftime syscall holder */
++	.quad sys_sync
++	.quad sys32_kill
++	.quad sys_rename
++	.quad sys_mkdir
++	.quad sys_rmdir		/* 40 */
++	.quad sys_dup
++	.quad sys32_pipe
++	.quad compat_sys_times
++	.quad quiet_ni_syscall			/* old prof syscall holder */
++	.quad sys_brk		/* 45 */
++	.quad sys_setgid16
++	.quad sys_getgid16
++	.quad sys_signal
++	.quad sys_geteuid16
++	.quad sys_getegid16	/* 50 */
++	.quad sys_acct
++	.quad sys_umount			/* new_umount */
++	.quad quiet_ni_syscall			/* old lock syscall holder */
++	.quad compat_sys_ioctl
++	.quad compat_sys_fcntl64		/* 55 */
++	.quad quiet_ni_syscall			/* old mpx syscall holder */
++	.quad sys_setpgid
++	.quad quiet_ni_syscall			/* old ulimit syscall holder */
++	.quad sys32_olduname
++	.quad sys_umask		/* 60 */
++	.quad sys_chroot
++	.quad sys32_ustat
++	.quad sys_dup2
++	.quad sys_getppid
++	.quad sys_getpgrp		/* 65 */
++	.quad sys_setsid
++	.quad sys32_sigaction
++	.quad sys_sgetmask
++	.quad sys_ssetmask
++	.quad sys_setreuid16	/* 70 */
++	.quad sys_setregid16
++	.quad stub32_sigsuspend
++	.quad compat_sys_sigpending
++	.quad sys_sethostname
++	.quad compat_sys_setrlimit	/* 75 */
++	.quad compat_sys_old_getrlimit	/* old_getrlimit */
++	.quad compat_sys_getrusage
++	.quad sys32_gettimeofday
++	.quad sys32_settimeofday
++	.quad sys_getgroups16	/* 80 */
++	.quad sys_setgroups16
++	.quad sys32_old_select
++	.quad sys_symlink
++	.quad sys_lstat
++	.quad sys_readlink		/* 85 */
++#ifdef CONFIG_IA32_AOUT
++	.quad sys_uselib
++#else
++	.quad quiet_ni_syscall
++#endif
++	.quad sys_swapon
++	.quad sys_reboot
++	.quad compat_sys_old_readdir
++	.quad sys32_mmap		/* 90 */
++	.quad sys_munmap
++	.quad sys_truncate
++	.quad sys_ftruncate
++	.quad sys_fchmod
++	.quad sys_fchown16		/* 95 */
++	.quad sys_getpriority
++	.quad sys_setpriority
++	.quad quiet_ni_syscall			/* old profil syscall holder */
++	.quad compat_sys_statfs
++	.quad compat_sys_fstatfs		/* 100 */
++	.quad sys_ioperm
++	.quad compat_sys_socketcall
++	.quad sys_syslog
++	.quad compat_sys_setitimer
++	.quad compat_sys_getitimer	/* 105 */
++	.quad compat_sys_newstat
++	.quad compat_sys_newlstat
++	.quad compat_sys_newfstat
++	.quad sys32_uname
++	.quad stub32_iopl		/* 110 */
++	.quad sys_vhangup
++	.quad quiet_ni_syscall	/* old "idle" system call */
++	.quad sys32_vm86_warning	/* vm86old */ 
++	.quad compat_sys_wait4
++	.quad sys_swapoff		/* 115 */
++	.quad sys32_sysinfo
++	.quad sys32_ipc
++	.quad sys_fsync
++	.quad stub32_sigreturn
++	.quad stub32_clone		/* 120 */
++	.quad sys_setdomainname
++	.quad sys_uname
++	.quad sys_modify_ldt
++	.quad compat_sys_adjtimex
++	.quad sys32_mprotect		/* 125 */
++	.quad compat_sys_sigprocmask
++	.quad quiet_ni_syscall		/* create_module */
++	.quad sys_init_module
++	.quad sys_delete_module
++	.quad quiet_ni_syscall		/* 130  get_kernel_syms */
++	.quad sys_quotactl
++	.quad sys_getpgid
++	.quad sys_fchdir
++	.quad quiet_ni_syscall	/* bdflush */
++	.quad sys_sysfs		/* 135 */
++	.quad sys_personality
++	.quad quiet_ni_syscall	/* for afs_syscall */
++	.quad sys_setfsuid16
++	.quad sys_setfsgid16
++	.quad sys_llseek		/* 140 */
++	.quad compat_sys_getdents
++	.quad compat_sys_select
++	.quad sys_flock
++	.quad sys_msync
++	.quad compat_sys_readv		/* 145 */
++	.quad compat_sys_writev
++	.quad sys_getsid
++	.quad sys_fdatasync
++	.quad sys32_sysctl	/* sysctl */
++	.quad sys_mlock		/* 150 */
++	.quad sys_munlock
++	.quad sys_mlockall
++	.quad sys_munlockall
++	.quad sys_sched_setparam
++	.quad sys_sched_getparam   /* 155 */
++	.quad sys_sched_setscheduler
++	.quad sys_sched_getscheduler
++	.quad sys_sched_yield
++	.quad sys_sched_get_priority_max
++	.quad sys_sched_get_priority_min  /* 160 */
++	.quad sys_sched_rr_get_interval
++	.quad compat_sys_nanosleep
++	.quad sys_mremap
++	.quad sys_setresuid16
++	.quad sys_getresuid16	/* 165 */
++	.quad sys32_vm86_warning	/* vm86 */ 
++	.quad quiet_ni_syscall	/* query_module */
++	.quad sys_poll
++	.quad compat_sys_nfsservctl
++	.quad sys_setresgid16	/* 170 */
++	.quad sys_getresgid16
++	.quad sys_prctl
++	.quad stub32_rt_sigreturn
++	.quad sys32_rt_sigaction
++	.quad sys32_rt_sigprocmask	/* 175 */
++	.quad sys32_rt_sigpending
++	.quad compat_sys_rt_sigtimedwait
++	.quad sys32_rt_sigqueueinfo
++	.quad stub32_rt_sigsuspend
++	.quad sys32_pread		/* 180 */
++	.quad sys32_pwrite
++	.quad sys_chown16
++	.quad sys_getcwd
++	.quad sys_capget
++	.quad sys_capset
++	.quad stub32_sigaltstack
++	.quad sys32_sendfile
++	.quad quiet_ni_syscall		/* streams1 */
++	.quad quiet_ni_syscall		/* streams2 */
++	.quad stub32_vfork            /* 190 */
++	.quad compat_sys_getrlimit
++	.quad sys32_mmap2
++	.quad sys32_truncate64
++	.quad sys32_ftruncate64
++	.quad sys32_stat64		/* 195 */
++	.quad sys32_lstat64
++	.quad sys32_fstat64
++	.quad sys_lchown
++	.quad sys_getuid
++	.quad sys_getgid		/* 200 */
++	.quad sys_geteuid
++	.quad sys_getegid
++	.quad sys_setreuid
++	.quad sys_setregid
++	.quad sys_getgroups	/* 205 */
++	.quad sys_setgroups
++	.quad sys_fchown
++	.quad sys_setresuid
++	.quad sys_getresuid
++	.quad sys_setresgid	/* 210 */
++	.quad sys_getresgid
++	.quad sys_chown
++	.quad sys_setuid
++	.quad sys_setgid
++	.quad sys_setfsuid		/* 215 */
++	.quad sys_setfsgid
++	.quad sys_pivot_root
++	.quad sys_mincore
++	.quad sys_madvise
++	.quad compat_sys_getdents64	/* 220 getdents64 */
++	.quad compat_sys_fcntl64	
++	.quad quiet_ni_syscall		/* tux */
++	.quad quiet_ni_syscall    	/* security */
++	.quad sys_gettid	
++	.quad sys_readahead	/* 225 */
++	.quad sys_setxattr
++	.quad sys_lsetxattr
++	.quad sys_fsetxattr
++	.quad sys_getxattr
++	.quad sys_lgetxattr	/* 230 */
++	.quad sys_fgetxattr
++	.quad sys_listxattr
++	.quad sys_llistxattr
++	.quad sys_flistxattr
++	.quad sys_removexattr	/* 235 */
++	.quad sys_lremovexattr
++	.quad sys_fremovexattr
++	.quad sys_tkill
++	.quad sys_sendfile64 
++	.quad compat_sys_futex		/* 240 */
++	.quad compat_sys_sched_setaffinity
++	.quad compat_sys_sched_getaffinity
++	.quad sys32_set_thread_area
++	.quad sys32_get_thread_area
++	.quad compat_sys_io_setup	/* 245 */
++	.quad sys_io_destroy
++	.quad compat_sys_io_getevents
++	.quad compat_sys_io_submit
++	.quad sys_io_cancel
++	.quad sys_fadvise64		/* 250 */
++	.quad quiet_ni_syscall 	/* free_huge_pages */
++	.quad sys_exit_group
++	.quad sys32_lookup_dcookie
++	.quad sys_epoll_create
++	.quad sys_epoll_ctl		/* 255 */
++	.quad sys_epoll_wait
++	.quad sys_remap_file_pages
++	.quad sys_set_tid_address
++	.quad compat_sys_timer_create
++	.quad compat_sys_timer_settime	/* 260 */
++	.quad compat_sys_timer_gettime
++	.quad sys_timer_getoverrun
++	.quad sys_timer_delete
++	.quad compat_sys_clock_settime
++	.quad compat_sys_clock_gettime	/* 265 */
++	.quad compat_sys_clock_getres
++	.quad compat_sys_clock_nanosleep
++	.quad compat_sys_statfs64
++	.quad compat_sys_fstatfs64
++	.quad sys_tgkill		/* 270 */
++	.quad compat_sys_utimes
++	.quad sys32_fadvise64_64
++	.quad quiet_ni_syscall	/* sys_vserver */
++	.quad sys_mbind
++	.quad compat_sys_get_mempolicy	/* 275 */
++	.quad sys_set_mempolicy
++	.quad compat_sys_mq_open
++	.quad sys_mq_unlink
++	.quad compat_sys_mq_timedsend
++	.quad compat_sys_mq_timedreceive	/* 280 */
++	.quad compat_sys_mq_notify
++	.quad compat_sys_mq_getsetattr
++	.quad compat_sys_kexec_load	/* reserved for kexec */
++	.quad compat_sys_waitid
++	.quad quiet_ni_syscall		/* 285: sys_altroot */
++	.quad sys_add_key
++	.quad sys_request_key
++	.quad sys_keyctl
++	.quad sys_ioprio_set
++	.quad sys_ioprio_get		/* 290 */
++	.quad sys_inotify_init
++	.quad sys_inotify_add_watch
++	.quad sys_inotify_rm_watch
++	.quad sys_migrate_pages
++	.quad compat_sys_openat		/* 295 */
++	.quad sys_mkdirat
++	.quad sys_mknodat
++	.quad sys_fchownat
++	.quad compat_sys_futimesat
++	.quad sys32_fstatat		/* 300 */
++	.quad sys_unlinkat
++	.quad sys_renameat
++	.quad sys_linkat
++	.quad sys_symlinkat
++	.quad sys_readlinkat		/* 305 */
++	.quad sys_fchmodat
++	.quad sys_faccessat
++	.quad compat_sys_pselect6
++	.quad compat_sys_ppoll
++	.quad sys_unshare		/* 310 */
++	.quad compat_sys_set_robust_list
++	.quad compat_sys_get_robust_list
++	.quad sys_splice
++	.quad sys_sync_file_range
++	.quad sys_tee
++	.quad compat_sys_vmsplice
++	.quad compat_sys_move_pages
++ia32_syscall_end:		
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/ia32entry.S
+--- a/arch/x86_64/ia32/ia32entry.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/ia32entry.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -715,8 +715,8 @@
+ 	.quad sys_readlinkat		/* 305 */
+ 	.quad sys_fchmodat
+ 	.quad sys_faccessat
+-	.quad quiet_ni_syscall		/* pselect6 for now */
+-	.quad quiet_ni_syscall		/* ppoll for now */
++	.quad compat_sys_pselect6
++	.quad compat_sys_ppoll
+ 	.quad sys_unshare		/* 310 */
+ 	.quad compat_sys_set_robust_list
+ 	.quad compat_sys_get_robust_list
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/syscall32-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/ia32/syscall32-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,137 @@
++/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
++
++/* vsyscall handling for 32bit processes. Map a stub page into it 
++   on demand because 32bit cannot reach the kernel's fixmaps */
++
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/init.h>
++#include <linux/stringify.h>
++#include <linux/security.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++#include <asm/ia32_unistd.h>
++#include <xen/interface/callback.h>
++
++extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
++extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
++extern int sysctl_vsyscall32;
++
++char *syscall32_page; 
++static int use_sysenter = -1;
++
++#if CONFIG_XEN_COMPAT < 0x030200
++extern unsigned char syscall32_int80[], syscall32_int80_end[];
++static int use_int80 = 1;
++#endif
++
++static struct page *
++syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
++{
++	struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
++	get_page(p);
++	return p;
++}
++
++/* Prevent VMA merging */
++static void syscall32_vma_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct syscall32_vm_ops = {
++	.close = syscall32_vma_close,
++	.nopage = syscall32_nopage,
++};
++
++struct linux_binprm;
++
++/* Setup a VMA at program startup for the vsyscall page */
++int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
++{
++	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
++	struct vm_area_struct *vma;
++	struct mm_struct *mm = current->mm;
++	int ret;
++
++	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++	if (!vma)
++		return -ENOMEM;
++
++	memset(vma, 0, sizeof(struct vm_area_struct));
++	/* Could randomize here */
++	vma->vm_start = VSYSCALL32_BASE;
++	vma->vm_end = VSYSCALL32_END;
++	/* MAYWRITE to allow gdb to COW and set breakpoints */
++	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++	vma->vm_flags |= mm->def_flags;
++	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
++	vma->vm_ops = &syscall32_vm_ops;
++	vma->vm_mm = mm;
++
++	down_write(&mm->mmap_sem);
++	if ((ret = insert_vm_struct(mm, vma))) {
++		up_write(&mm->mmap_sem);
++		kmem_cache_free(vm_area_cachep, vma);
++		return ret;
++	}
++	mm->total_vm += npages;
++	up_write(&mm->mmap_sem);
++	return 0;
++}
++
++static int __init init_syscall32(void)
++{ 
++	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
++	if (!syscall32_page) 
++		panic("Cannot allocate syscall32 page"); 
++
++#if CONFIG_XEN_COMPAT < 0x030200
++	if (use_int80) {
++		memcpy(syscall32_page, syscall32_int80,
++		       syscall32_int80_end - syscall32_int80);
++	} else
++#endif
++ 	if (use_sysenter > 0) {
++ 		memcpy(syscall32_page, syscall32_sysenter,
++ 		       syscall32_sysenter_end - syscall32_sysenter);
++ 	} else {
++  		memcpy(syscall32_page, syscall32_syscall,
++  		       syscall32_syscall_end - syscall32_syscall);
++  	}	
++	return 0;
++} 
++
++/*
++ * This must be done early in case we have an initrd containing 32-bit
++ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
++ */	
++core_initcall(init_syscall32); 
++
++/* May not be __init: called during resume */
++void syscall32_cpu_init(void)
++{
++	static const struct callback_register cstar = {
++		.type = CALLBACKTYPE_syscall32,
++		.address = (unsigned long)ia32_cstar_target
++	};
++	static const struct callback_register sysenter = {
++		.type = CALLBACKTYPE_sysenter,
++		.address = (unsigned long)ia32_sysenter_target
++	};
++
++	/* Load these always in case some future AMD CPU supports
++	   SYSENTER from compat mode too. */
++	if ((HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0) ||
++	    (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0))
++#if CONFIG_XEN_COMPAT < 0x030200
++		return;
++	use_int80 = 0;
++#else
++		BUG();
++#endif
++
++	if (use_sysenter < 0)
++		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/syscall32.c
+--- a/arch/x86_64/ia32/syscall32.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/syscall32.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -59,6 +59,13 @@
+ 	vma->vm_end = VSYSCALL32_END;
+ 	/* MAYWRITE to allow gdb to COW and set breakpoints */
+ 	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++	/*
++	 * Make sure the vDSO gets into every core dump.
++	 * Dumping its contents makes post-mortem fully interpretable later
++	 * without matching up the same kernel and hardware config to see
++	 * what PC values meant.
++	 */
++	vma->vm_flags |= VM_ALWAYSDUMP;
+ 	vma->vm_flags |= mm->def_flags;
+ 	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ 	vma->vm_ops = &syscall32_vm_ops;
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/syscall32_syscall-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/ia32/syscall32_syscall-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,28 @@
++/* 32bit VDSOs mapped into user space. */
++
++	.section ".init.data","aw"
++
++#if CONFIG_XEN_COMPAT < 0x030200
++
++	.globl syscall32_int80
++	.globl syscall32_int80_end
++
++syscall32_int80:
++	.incbin "arch/x86_64/ia32/vsyscall-int80.so"
++syscall32_int80_end:
++
++#endif
++
++	.globl syscall32_syscall
++	.globl syscall32_syscall_end
++
++syscall32_syscall:
++	.incbin "arch/x86_64/ia32/vsyscall-syscall.so"
++syscall32_syscall_end:
++
++	.globl syscall32_sysenter
++	.globl syscall32_sysenter_end
++
++syscall32_sysenter:
++	.incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
++syscall32_sysenter_end:
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/vsyscall-int80.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/ia32/vsyscall-int80.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,58 @@
++/*
++ * Code for the vsyscall page.  This version uses the old int $0x80 method.
++ *
++ * NOTE:
++ * 1) __kernel_vsyscall _must_ be first in this page.
++ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
++ *    for details.
++ */
++#include <asm/ia32_unistd.h>
++#include <asm/asm-offsets.h>
++
++	.code32
++	.text
++	.section .text.vsyscall,"ax"
++	.globl __kernel_vsyscall
++	.type __kernel_vsyscall, at function
++__kernel_vsyscall:
++.LSTART_vsyscall:
++	int $0x80
++	ret
++.LEND_vsyscall:
++	.size __kernel_vsyscall,.-.LSTART_vsyscall
++	.previous
++
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAME:
++	.long .LENDCIE-.LSTARTCIE
++.LSTARTCIE:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zR"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0x0c		/* DW_CFA_def_cfa */
++	.uleb128 4
++	.uleb128 4
++	.byte 0x88		/* DW_CFA_offset, column 0x8 */
++	.uleb128 1
++	.align 4
++.LENDCIE:
++
++	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
++.LSTARTFDE1:
++	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
++	.long .LSTART_vsyscall-.	/* PC-relative start address */
++	.long .LEND_vsyscall-.LSTART_vsyscall
++	.uleb128 0			/* Augmentation length */
++	.align 4
++.LENDFDE1:
++		
++/*
++ * Get the common code for the sigreturn entry points.
++ */
++#define SYSCALL_ENTER_KERNEL    int $0x80
++#include "vsyscall-sigreturn.S"
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/ia32/vsyscall-sigreturn.S
+--- a/arch/x86_64/ia32/vsyscall-sigreturn.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/ia32/vsyscall-sigreturn.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -139,5 +139,5 @@
+ 	.align 4
+ .LENDFDE3:
+ 
+-#include "../../i386/kernel/vsyscall-note.S"
++#include <vsyscall-note.S>
+ 
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/Makefile
+--- a/arch/x86_64/kernel/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -21,11 +21,13 @@
+ obj-$(CONFIG_X86_CPUID)		+= cpuid.o
+ obj-$(CONFIG_SMP)		+= smp.o smpboot.o trampoline.o
+ obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o  nmi.o
++obj-$(CONFIG_X86_XEN_GENAPIC)	+= genapic.o genapic_xen.o
+ obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o \
+ 		genapic.o genapic_cluster.o genapic_flat.o
+ obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
+ obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
+-obj-$(CONFIG_PM)		+= suspend.o
++obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend.o
++obj-$(CONFIG_ACPI_SLEEP)	+= suspend.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend_asm.o
+ obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
+ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+@@ -55,3 +57,8 @@
+ msr-$(subst m,y,$(CONFIG_X86_MSR))  += ../../i386/kernel/msr.o
+ alternative-y			+= ../../i386/kernel/alternative.o
+ 
++time-$(CONFIG_XEN)		+= ../../i386/kernel/time.o
++pci-dma-$(CONFIG_XEN)		+= ../../i386/kernel/pci-dma.o
++
++disabled-obj-$(CONFIG_XEN)	:= i8259.o reboot.o smpboot.o trampoline.o
++%/head.o %/head.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/acpi/Makefile
+--- a/arch/x86_64/kernel/acpi/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/acpi/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -5,5 +5,10 @@
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y			+= processor.o
+ processor-y		:= ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
++processor-$(CONFIG_XEN)	:= ../../../i386/kernel/acpi/processor.o
++ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),)
++processor-$(CONFIG_XEN)	+= ../../../i386/kernel/acpi/processor_extcntl_xen.o
++endif
+ endif
+ 
++disabled-obj-$(CONFIG_XEN) := wakeup.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/acpi/sleep-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/acpi/sleep-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,146 @@
++/*
++ *  acpi.c - Architecture-Specific Low-Level ACPI Support
++ *
++ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
++ *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
++ *  Copyright (C) 2001 Patrick Mochel <mochel at osdl.org>
++ *  Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
++ *  Copyright (C) 2003 Pavel Machek, SuSE Labs
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; either version 2 of the License, or
++ *  (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *  GNU General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/bootmem.h>
++#include <linux/acpi.h>
++#include <linux/cpumask.h>
++
++#include <asm/mpspec.h>
++#include <asm/io.h>
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++
++/* --------------------------------------------------------------------------
++                              Low-Level Sleep Support
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_SLEEP
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++
++static pgd_t low_ptr;
++
++static void init_low_mapping(void)
++{
++	pgd_t *slot0 = pgd_offset(current->mm, 0UL);
++	low_ptr = *slot0;
++	set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
++	WARN_ON(num_online_cpus() != 1);
++	local_flush_tlb();
++}
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++	init_low_mapping();
++
++	memcpy((void *)acpi_wakeup_address, &wakeup_start,
++	       &wakeup_end - &wakeup_start);
++	acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++	return 0;
++}
++
++/*
++ * acpi_restore_state
++ */
++void acpi_restore_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++	set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
++	local_flush_tlb();
++#endif
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page in low memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16M pages, but not
++ * <1M pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
++		printk(KERN_CRIT
++		       "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++	while ((str != NULL) && (*str != '\0')) {
++		if (strncmp(str, "s3_bios", 7) == 0)
++			acpi_video_flags = 1;
++		if (strncmp(str, "s3_mode", 7) == 0)
++			acpi_video_flags |= 2;
++		str = strchr(str, ',');
++		if (str != NULL)
++			str += strspn(str, ", \t");
++	}
++
++	return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++#endif				/* CONFIG_ACPI_PV_SLEEP */
++
++#endif				/*CONFIG_ACPI_SLEEP */
++
++void acpi_pci_link_exit(void)
++{
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/apic-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/apic-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,197 @@
++/*
++ *	Local APIC handling, local APIC timers
++ *
++ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively.
++ *	Maciej W. Rozycki	:	Various updates and fixes.
++ *	Mikael Pettersson	:	Power Management for UP-APIC.
++ *	Pavel Machek and
++ *	Mikael Pettersson	:	PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/idle.h>
++
++int apic_verbosity;
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++	printk("unexpected IRQ trap at vector %02x\n", irq);
++	/*
++	 * Currently unexpected vectors happen only on SMP and APIC.
++	 * We _must_ ack these because every local APIC has only N
++	 * irq slots per priority level, and a 'hanging, unacked' IRQ
++	 * holds up an irq slot - in excessive cases (when multiple
++	 * unexpected vectors occur) that might lock up the APIC
++	 * completely.
++  	 * But don't ack when the APIC is disabled. -AK
++	 */
++	if (!disable_apic)
++		ack_APIC_irq();
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++	return -EINVAL;
++}
++
++void smp_local_timer_interrupt(struct pt_regs *regs)
++{
++	profile_tick(CPU_PROFILING, regs);
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++		update_process_times(user_mode(regs));
++#endif
++#endif
++	/*
++	 * We take the 'long' return path, and there every subsystem
++	 * grabs the appropriate locks (kernel lock/ irq lock).
++	 *
++	 * we might want to decouple profiling from the 'long path',
++	 * and do the profiling totally in assembly.
++	 *
++	 * Currently this isn't too much of an issue (performance wise),
++	 * we can take more than 100K local irqs per second on a 100 MHz P5.
++	 */
++}
++
++/*
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ *   interrupt as well. Thus we cannot inline the local irq ... ]
++ */
++void smp_apic_timer_interrupt(struct pt_regs *regs)
++{
++	/*
++	 * the NMI deadlock-detector uses this.
++	 */
++	add_pda(apic_timer_irqs, 1);
++
++	/*
++	 * NOTE! We'd better ACK the irq immediately,
++	 * because timer handling can be slow.
++	 */
++	ack_APIC_irq();
++	/*
++	 * update_process_times() expects us to have done irq_enter().
++	 * Besides, if we don't timer interrupts ignore the global
++	 * interrupt lock, which is the WrongThing (tm) to do.
++	 */
++	exit_idle();
++	irq_enter();
++	smp_local_timer_interrupt(regs);
++	irq_exit();
++}
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++	unsigned int v;
++	exit_idle();
++	irq_enter();
++	/*
++	 * Check if this really is a spurious interrupt and ACK it
++	 * if it is a vectored one.  Just in case...
++	 * Spurious interrupts should not be ACKed.
++	 */
++	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++		ack_APIC_irq();
++
++#if 0
++	static unsigned long last_warning; 
++	static unsigned long skipped; 
++
++	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
++	if (time_before(last_warning+30*HZ,jiffies)) { 
++		printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
++		       smp_processor_id(), skipped);
++		last_warning = jiffies; 
++		skipped = 0;
++	} else { 
++		skipped++; 
++	} 
++#endif 
++	irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++
++asmlinkage void smp_error_interrupt(void)
++{
++	unsigned int v, v1;
++
++	exit_idle();
++	irq_enter();
++	/* First tickle the hardware, only then report what went on. -- REW */
++	v = apic_read(APIC_ESR);
++	apic_write(APIC_ESR, 0);
++	v1 = apic_read(APIC_ESR);
++	ack_APIC_irq();
++	atomic_inc(&irq_err_count);
++
++	/* Here is what the APIC error bits mean:
++	   0: Send CS error
++	   1: Receive CS error
++	   2: Send accept error
++	   3: Receive accept error
++	   4: Reserved
++	   5: Send illegal vector
++	   6: Received illegal vector
++	   7: Illegal register address
++	*/
++	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++	        smp_processor_id(), v , v1);
++	irq_exit();
++}
++
++int disable_apic;
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++	if (smp_found_config)
++		if (!skip_ioapic_setup && nr_ioapics)
++			setup_IO_APIC();
++#endif
++
++	return 1;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/asm-offsets.c
+--- a/arch/x86_64/kernel/asm-offsets.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/asm-offsets.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -67,8 +67,10 @@
+ 	DEFINE(pbe_address, offsetof(struct pbe, address));
+ 	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ 	DEFINE(pbe_next, offsetof(struct pbe, next));
++#ifndef CONFIG_X86_NO_TSS
+ 	BLANK();
+ 	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
++#endif
+ 	BLANK();
+ 	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+ 	return 0;
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/crash.c
+--- a/arch/x86_64/kernel/crash.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/crash.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -92,6 +92,7 @@
+ 	crash_save_this_cpu(regs, cpu);
+ }
+ 
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_SMP
+ static atomic_t waiting_for_crash_ipi;
+ 
+@@ -156,6 +157,7 @@
+ 	/* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+ 
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -173,6 +175,8 @@
+ 
+ 	/* Make a note of crashing cpu. Will be used in NMI callback.*/
+ 	crashing_cpu = smp_processor_id();
++
++#ifndef CONFIG_XEN
+ 	nmi_shootdown_cpus();
+ 
+ 	if(cpu_has_apic)
+@@ -181,6 +185,6 @@
+ #if defined(CONFIG_X86_IO_APIC)
+ 	disable_IO_APIC();
+ #endif
+-
++#endif /* CONFIG_XEN */
+ 	crash_save_self(regs);
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/e820-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/e820-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,798 @@
++/* 
++ * Handle the memory map.
++ * The functions here do the job until bootmem takes over.
++ *
++ *  Getting sanitize_e820_map() in sync with i386 version by applying change:
++ *  -  Provisions for empty E820 memory regions (reported by certain BIOSes).
++ *     Alex Achenbach <xela at slit.de>, December 2002.
++ *  Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <linux/kexec.h>
++#include <linux/module.h>
++
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/proto.h>
++#include <asm/bootsetup.h>
++#include <asm/sections.h>
++#include <xen/interface/memory.h>
++
++/* 
++ * PFN of last memory page.
++ */
++unsigned long end_pfn; 
++EXPORT_SYMBOL(end_pfn);
++
++/* 
++ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
++ * The direct mapping extends to end_pfn_map, so that we can directly access
++ * apertures, ACPI and other tables without having to play with fixmaps.
++ */ 
++unsigned long end_pfn_map; 
++
++/* 
++ * Last pfn which the user wants to use.
++ */
++unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;  
++
++extern struct resource code_resource, data_resource;
++
++#ifdef CONFIG_XEN
++extern struct e820map machine_e820;
++#endif
++
++/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{ 
++	unsigned long addr = *addrp, last = addr + size; 
++
++#ifndef CONFIG_XEN
++	/* various gunk below that needed for SMP startup */
++	if (addr < 0x8000) { 
++		*addrp = 0x8000;
++		return 1; 
++	}
++
++	/* direct mapping tables of the kernel */
++	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
++		*addrp = table_end << PAGE_SHIFT; 
++		return 1;
++	} 
++
++	/* initrd */ 
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (LOADER_TYPE && INITRD_START && last >= INITRD_START && 
++	    addr < INITRD_START+INITRD_SIZE) { 
++		*addrp = INITRD_START + INITRD_SIZE; 
++		return 1;
++	} 
++#endif
++	/* kernel code + 640k memory hole (later should not be needed, but 
++	   be paranoid for now) */
++	if (last >= 640*1024 && addr < 1024*1024) {
++		*addrp = 1024*1024;
++		return 1;
++	}
++	if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
++		*addrp = __pa_symbol(&_end);
++		return 1;
++	}
++
++	if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
++		*addrp = ebda_addr + ebda_size;
++		return 1;
++	}
++
++	/* XXX ramdisk image here? */ 
++#else
++	if (last < (table_end<<PAGE_SHIFT)) {
++		*addrp = table_end << PAGE_SHIFT;
++		return 1;
++	}
++#endif
++	return 0;
++} 
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
++{ 
++	int i;
++
++#ifndef CONFIG_XEN
++	for (i = 0; i < e820.nr_map; i++) { 
++		struct e820entry *ei = &e820.map[i]; 
++#else
++	if (!is_initial_xendomain())
++		return 0;
++	for (i = 0; i < machine_e820.nr_map; i++) {
++		const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++		if (type && ei->type != type) 
++			continue;
++		if (ei->addr >= end || ei->addr + ei->size <= start)
++			continue; 
++		return 1; 
++	} 
++	return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++/*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++	int i;
++
++#ifndef CONFIG_XEN
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++#else
++	if (!is_initial_xendomain())
++		return 0;
++	for (i = 0; i < machine_e820.nr_map; i++) {
++		const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++		if (type && ei->type != type)
++			continue;
++		/* is the region (part) in overlap with the current region ?*/
++		if (ei->addr >= end || ei->addr + ei->size <= start)
++			continue;
++
++		/* if the region is at the beginning of <start,end> we move
++		 * start to the end of the region since it's ok until there
++		 */
++		if (ei->addr <= start)
++			start = ei->addr + ei->size;
++		/* if start is now at or beyond end, we're done, full coverage */
++		if (start >= end)
++			return 1; /* we're done */
++	}
++	return 0;
++}
++
++/* 
++ * Find a free area in a specific range. 
++ */ 
++unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
++{ 
++	int i; 
++	for (i = 0; i < e820.nr_map; i++) { 
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long addr = ei->addr, last; 
++		if (ei->type != E820_RAM) 
++			continue; 
++		if (addr < start) 
++			addr = start;
++		if (addr > ei->addr + ei->size) 
++			continue; 
++		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
++			;
++		last = addr + size;
++		if (last > ei->addr + ei->size)
++			continue;
++		if (last > end) 
++			continue;
++		return addr; 
++	} 
++	return -1UL;		
++} 
++
++/* 
++ * Free bootmem based on the e820 table for a node.
++ */
++void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
++{
++	int i;
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long last, addr;
++
++		if (ei->type != E820_RAM || 
++		    ei->addr+ei->size <= start || 
++		    ei->addr >= end)
++			continue;
++
++		addr = round_up(ei->addr, PAGE_SIZE);
++		if (addr < start) 
++			addr = start;
++
++		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
++		if (last >= end)
++			last = end; 
++
++		if (last > addr && last-addr >= PAGE_SIZE)
++			free_bootmem_node(pgdat, addr, last-addr);
++	}
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++unsigned long __init e820_end_of_ram(void)
++{
++	int i;
++	unsigned long end_pfn = 0;
++	
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long start, end;
++
++		start = round_up(ei->addr, PAGE_SIZE); 
++		end = round_down(ei->addr + ei->size, PAGE_SIZE); 
++		if (start >= end)
++			continue;
++		if (ei->type == E820_RAM) { 
++		if (end > end_pfn<<PAGE_SHIFT)
++			end_pfn = end>>PAGE_SHIFT;
++		} else { 
++			if (end > end_pfn_map<<PAGE_SHIFT) 
++				end_pfn_map = end>>PAGE_SHIFT;
++		} 
++	}
++
++	if (end_pfn > end_pfn_map) 
++		end_pfn_map = end_pfn;
++	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
++		end_pfn_map = MAXMEM>>PAGE_SHIFT;
++	if (end_pfn > end_user_pfn)
++		end_pfn = end_user_pfn;
++	if (end_pfn > end_pfn_map) 
++		end_pfn = end_pfn_map; 
++
++	return end_pfn;	
++}
++
++/* 
++ * Compute how much memory is missing in a range.
++ * Unlike the other functions in this file the arguments are in page numbers.
++ */
++unsigned long __init
++e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
++{
++	unsigned long ram = 0;
++	unsigned long start = start_pfn << PAGE_SHIFT;
++	unsigned long end = end_pfn << PAGE_SHIFT;
++	int i;
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++		unsigned long last, addr;
++
++		if (ei->type != E820_RAM ||
++		    ei->addr+ei->size <= start ||
++		    ei->addr >= end)
++			continue;
++
++		addr = round_up(ei->addr, PAGE_SIZE);
++		if (addr < start)
++			addr = start;
++
++		last = round_down(ei->addr + ei->size, PAGE_SIZE);
++		if (last >= end)
++			last = end;
++
++		if (last > addr)
++			ram += last - addr;
++	}
++	return ((end - start) - ram) >> PAGE_SHIFT;
++}
++
++/*
++ * Mark e820 reserved areas as busy for the resource manager.
++ */
++void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
++{
++	int i;
++	for (i = 0; i < nr_map; i++) {
++		struct resource *res;
++		res = alloc_bootmem_low(sizeof(struct resource));
++		switch (e820[i].type) {
++		case E820_RAM:	res->name = "System RAM"; break;
++		case E820_ACPI:	res->name = "ACPI Tables"; break;
++		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
++		default:	res->name = "reserved";
++		}
++		res->start = e820[i].addr;
++		res->end = res->start + e820[i].size - 1;
++		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++		request_resource(&iomem_resource, res);
++		if (e820[i].type == E820_RAM) {
++			/*
++			 *  We don't know which RAM region contains kernel data,
++			 *  so we try it repeatedly and let the resource manager
++			 *  test it.
++			 */
++#ifndef CONFIG_XEN
++			request_resource(res, &code_resource);
++			request_resource(res, &data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++			if (crashk_res.start != crashk_res.end)
++				request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++			xen_machine_kexec_register_resources(res);
++#endif
++#endif
++		}
++	}
++}
++
++/* 
++ * Add a memory region to the kernel e820 map.
++ */ 
++void __init add_memory_region(unsigned long start, unsigned long size, int type)
++{
++	int x = e820.nr_map;
++
++	if (x == E820MAX) {
++		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++		return;
++	}
++
++	e820.map[x].addr = start;
++	e820.map[x].size = size;
++	e820.map[x].type = type;
++	e820.nr_map++;
++}
++
++void __init e820_print_map(char *who)
++{
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		printk(" %s: %016Lx - %016Lx ", who,
++			(unsigned long long) e820.map[i].addr,
++			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
++		switch (e820.map[i].type) {
++		case E820_RAM:	printk("(usable)\n");
++				break;
++		case E820_RESERVED:
++				printk("(reserved)\n");
++				break;
++		case E820_ACPI:
++				printk("(ACPI data)\n");
++				break;
++		case E820_NVS:
++				printk("(ACPI NVS)\n");
++				break;
++		default:	printk("type %u\n", e820.map[i].type);
++				break;
++		}
++	}
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries.  The following 
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++	struct change_member {
++		struct e820entry *pbios; /* pointer to original bios entry */
++		unsigned long long addr; /* address for this change point */
++	};
++	static struct change_member change_point_list[2*E820MAX] __initdata;
++	static struct change_member *change_point[2*E820MAX] __initdata;
++	static struct e820entry *overlap_list[E820MAX] __initdata;
++	static struct e820entry new_bios[E820MAX] __initdata;
++	struct change_member *change_tmp;
++	unsigned long current_type, last_type;
++	unsigned long long last_addr;
++	int chgidx, still_changing;
++	int overlap_entries;
++	int new_bios_entry;
++	int old_nr, new_nr, chg_nr;
++	int i;
++
++	/*
++		Visually we're performing the following (1,2,3,4 = memory types)...
++
++		Sample memory map (w/overlaps):
++		   ____22__________________
++		   ______________________4_
++		   ____1111________________
++		   _44_____________________
++		   11111111________________
++		   ____________________33__
++		   ___________44___________
++		   __________33333_________
++		   ______________22________
++		   ___________________2222_
++		   _________111111111______
++		   _____________________11_
++		   _________________4______
++
++		Sanitized equivalent (no overlap):
++		   1_______________________
++		   _44_____________________
++		   ___1____________________
++		   ____22__________________
++		   ______11________________
++		   _________1______________
++		   __________3_____________
++		   ___________44___________
++		   _____________33_________
++		   _______________2________
++		   ________________1_______
++		   _________________4______
++		   ___________________2____
++		   ____________________33__
++		   ______________________4_
++	*/
++
++	/* if there's only one memory region, don't bother */
++	if (*pnr_map < 2)
++		return -1;
++
++	old_nr = *pnr_map;
++
++	/* bail out if we find any unreasonable addresses in bios map */
++	for (i=0; i<old_nr; i++)
++		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++			return -1;
++
++	/* create pointers for initial change-point information (for sorting) */
++	for (i=0; i < 2*old_nr; i++)
++		change_point[i] = &change_point_list[i];
++
++	/* record all known change-points (starting and ending addresses),
++	   omitting those that are for empty memory regions */
++	chgidx = 0;
++	for (i=0; i < old_nr; i++)	{
++		if (biosmap[i].size != 0) {
++			change_point[chgidx]->addr = biosmap[i].addr;
++			change_point[chgidx++]->pbios = &biosmap[i];
++			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++			change_point[chgidx++]->pbios = &biosmap[i];
++		}
++	}
++	chg_nr = chgidx;
++
++	/* sort change-point list by memory addresses (low -> high) */
++	still_changing = 1;
++	while (still_changing)	{
++		still_changing = 0;
++		for (i=1; i < chg_nr; i++)  {
++			/* if <current_addr> > <last_addr>, swap */
++			/* or, if current=<start_addr> & last=<end_addr>, swap */
++			if ((change_point[i]->addr < change_point[i-1]->addr) ||
++				((change_point[i]->addr == change_point[i-1]->addr) &&
++				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
++				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++			   )
++			{
++				change_tmp = change_point[i];
++				change_point[i] = change_point[i-1];
++				change_point[i-1] = change_tmp;
++				still_changing=1;
++			}
++		}
++	}
++
++	/* create a new bios memory map, removing overlaps */
++	overlap_entries=0;	 /* number of entries in the overlap table */
++	new_bios_entry=0;	 /* index for creating new bios map entries */
++	last_type = 0;		 /* start with undefined memory type */
++	last_addr = 0;		 /* start with 0 as last starting address */
++	/* loop through change-points, determining affect on the new bios map */
++	for (chgidx=0; chgidx < chg_nr; chgidx++)
++	{
++		/* keep track of all overlapping bios entries */
++		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++		{
++			/* add map entry to overlap list (> 1 entry implies an overlap) */
++			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++		}
++		else
++		{
++			/* remove entry from list (order independent, so swap with last) */
++			for (i=0; i<overlap_entries; i++)
++			{
++				if (overlap_list[i] == change_point[chgidx]->pbios)
++					overlap_list[i] = overlap_list[overlap_entries-1];
++			}
++			overlap_entries--;
++		}
++		/* if there are overlapping entries, decide which "type" to use */
++		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++		current_type = 0;
++		for (i=0; i<overlap_entries; i++)
++			if (overlap_list[i]->type > current_type)
++				current_type = overlap_list[i]->type;
++		/* continue building up new bios map based on this information */
++		if (current_type != last_type)	{
++			if (last_type != 0)	 {
++				new_bios[new_bios_entry].size =
++					change_point[chgidx]->addr - last_addr;
++				/* move forward only if the new size was non-zero */
++				if (new_bios[new_bios_entry].size != 0)
++					if (++new_bios_entry >= E820MAX)
++						break; 	/* no more space left for new bios entries */
++			}
++			if (current_type != 0)	{
++				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++				new_bios[new_bios_entry].type = current_type;
++				last_addr=change_point[chgidx]->addr;
++			}
++			last_type = current_type;
++		}
++	}
++	new_nr = new_bios_entry;   /* retain count for new bios entries */
++
++	/* copy new bios mapping into original location */
++	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++	*pnr_map = new_nr;
++
++	return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory.  If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++	/* Only one memory region (or negative)? Ignore it */
++	if (nr_map < 2)
++		return -1;
++#else
++	BUG_ON(nr_map < 1);
++#endif
++
++	do {
++		unsigned long start = biosmap->addr;
++		unsigned long size = biosmap->size;
++		unsigned long end = start + size;
++		unsigned long type = biosmap->type;
++
++		/* Overflow in 64 bits? Ignore the memory map. */
++		if (start > end)
++			return -1;
++
++#ifndef CONFIG_XEN
++		/*
++		 * Some BIOSes claim RAM in the 640k - 1M region.
++		 * Not right. Fix it up.
++		 * 
++		 * This should be removed on Hammer which is supposed to not
++		 * have non e820 covered ISA mappings there, but I had some strange
++		 * problems so it stays for now.  -AK
++		 */
++		if (type == E820_RAM) {
++			if (start < 0x100000ULL && end > 0xA0000ULL) {
++				if (start < 0xA0000ULL)
++					add_memory_region(start, 0xA0000ULL-start, type);
++				if (end <= 0x100000ULL)
++					continue;
++				start = 0x100000ULL;
++				size = end - start;
++			}
++		}
++#endif
++
++		add_memory_region(start, size, type);
++	} while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain()) {
++		struct xen_memory_map memmap;
++
++		memmap.nr_entries = E820MAX;
++		set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++			BUG();
++		machine_e820.nr_map = memmap.nr_entries;
++	} else
++		machine_e820 = e820;
++#endif
++
++	return 0;
++}
++
++#ifndef CONFIG_XEN
++void __init setup_memory_region(void)
++{
++	char *who = "BIOS-e820";
++
++	/*
++	 * Try to copy the BIOS-supplied E820-map.
++	 *
++	 * Otherwise fake a memory map; one section from 0k->640k,
++	 * the next section from 1mb->appropriate_mem_k
++	 */
++	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
++	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
++		unsigned long mem_size;
++
++		/* compare results from other methods and take the greater */
++		if (ALT_MEM_K < EXT_MEM_K) {
++			mem_size = EXT_MEM_K;
++			who = "BIOS-88";
++		} else {
++			mem_size = ALT_MEM_K;
++			who = "BIOS-e801";
++		}
++
++		e820.nr_map = 0;
++		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
++		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
++  	}
++	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++	e820_print_map(who);
++}
++
++#else  /* CONFIG_XEN */
++
++void __init setup_memory_region(void)
++{
++	int rc;
++	struct xen_memory_map memmap;
++	/*
++	 * This is rather large for a stack variable but this early in
++	 * the boot process we know we have plenty slack space.
++	 */
++	struct e820entry map[E820MAX];
++
++	memmap.nr_entries = E820MAX;
++	set_xen_guest_handle(memmap.buffer, map);
++
++	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++	if ( rc == -ENOSYS ) {
++		memmap.nr_entries = 1;
++		map[0].addr = 0ULL;
++		map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
++		/* 8MB slack (to balance backend allocations). */
++		map[0].size += 8 << 20;
++		map[0].type = E820_RAM;
++		rc = 0;
++	}
++	BUG_ON(rc);
++
++	sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++	e820_print_map("Xen");
++}
++#endif
++
++void __init parse_memopt(char *p, char **from) 
++{ 
++	int i;
++	unsigned long current_end;
++	unsigned long end;
++
++	end_user_pfn = memparse(p, from);
++	end_user_pfn >>= PAGE_SHIFT;	
++
++	end = end_user_pfn<<PAGE_SHIFT;
++	i = e820.nr_map-1;
++	current_end = e820.map[i].addr + e820.map[i].size;
++
++	if (current_end < end) {
++		/*
++                 * The e820 map ends before our requested size so
++                 * extend the final entry to the requested address.
++                 */
++		if (e820.map[i].type == E820_RAM)
++			e820.map[i].size = end - e820.map[i].addr;
++		else
++			add_memory_region(current_end, end - current_end, E820_RAM);
++	}
++} 
++
++void __init parse_memmapopt(char *p, char **from)
++{
++	unsigned long long start_at, mem_size;
++
++	mem_size = memparse(p, from);
++	p = *from;
++	if (*p == '@') {
++		start_at = memparse(p+1, from);
++		add_memory_region(start_at, mem_size, E820_RAM);
++	} else if (*p == '#') {
++		start_at = memparse(p+1, from);
++		add_memory_region(start_at, mem_size, E820_ACPI);
++	} else if (*p == '$') {
++		start_at = memparse(p+1, from);
++		add_memory_region(start_at, mem_size, E820_RESERVED);
++	} else {
++		end_user_pfn = (mem_size >> PAGE_SHIFT);
++	}
++	p = *from;
++}
++
++unsigned long pci_mem_start = 0xaeedbabe;
++EXPORT_SYMBOL(pci_mem_start);
++
++/*
++ * Search for the biggest gap in the low 32 bits of the e820
++ * memory space.  We pass this space to PCI to assign MMIO resources
++ * for hotplug or unconfigured devices in.
++ * Hopefully the BIOS let enough space left.
++ */
++__init void e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++	unsigned long gapstart, gapsize, round;
++	unsigned long last;
++	int i;
++	int found = 0;
++
++	last = 0x100000000ull;
++	gapstart = 0x10000000;
++	gapsize = 0x400000;
++	i = nr_map;
++	while (--i >= 0) {
++		unsigned long long start = e820[i].addr;
++		unsigned long long end = start + e820[i].size;
++
++		/*
++		 * Since "last" is at most 4GB, we know we'll
++		 * fit in 32 bits if this condition is true
++		 */
++		if (last > end) {
++			unsigned long gap = last - end;
++
++			if (gap > gapsize) {
++				gapsize = gap;
++				gapstart = end;
++				found = 1;
++			}
++		}
++		if (start < last)
++			last = start;
++	}
++
++	if (!found) {
++		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
++		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++	}
++
++	/*
++	 * See how much we want to round up: start off with
++	 * rounding to the next 1MB area.
++	 */
++	round = 0x100000;
++	while ((gapsize >> 4) > round)
++		round += round;
++	/* Fun with two's complement */
++	pci_mem_start = (gapstart + round) & -round;
++
++	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++		pci_mem_start, gapstart, gapsize);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/e820.c
+--- a/arch/x86_64/kernel/e820.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/e820.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -93,7 +93,7 @@
+  * This function checks if any part of the range <start,end> is mapped
+  * with type.
+  */
+-int __meminit
++int
+ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
+ { 
+ 	int i;
+@@ -107,6 +107,7 @@
+ 	} 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(e820_any_mapped);
+ 
+ /*
+  * This function checks if the entire range <start,end> is mapped with type.
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/early_printk-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/early_printk-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,302 @@
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/screen_info.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/fcntl.h>
++
++/* Simple VGA output */
++
++#ifdef __i386__
++#include <asm/setup.h>
++#define VGABASE		(__ISA_IO_base + 0xb8000)
++#else
++#include <asm/bootsetup.h>
++#define VGABASE		((void __iomem *)0xffffffff800b8000UL)
++#endif
++
++#ifndef CONFIG_XEN
++static int max_ypos = 25, max_xpos = 80;
++static int current_ypos = 25, current_xpos = 0; 
++
++static void early_vga_write(struct console *con, const char *str, unsigned n)
++{
++	char c;
++	int  i, k, j;
++
++	while ((c = *str++) != '\0' && n-- > 0) {
++		if (current_ypos >= max_ypos) {
++			/* scroll 1 line up */
++			for (k = 1, j = 0; k < max_ypos; k++, j++) {
++				for (i = 0; i < max_xpos; i++) {
++					writew(readw(VGABASE+2*(max_xpos*k+i)),
++					       VGABASE + 2*(max_xpos*j + i));
++				}
++			}
++			for (i = 0; i < max_xpos; i++)
++				writew(0x720, VGABASE + 2*(max_xpos*j + i));
++			current_ypos = max_ypos-1;
++		}
++		if (c == '\n') {
++			current_xpos = 0;
++			current_ypos++;
++		} else if (c != '\r')  {
++			writew(((0x7 << 8) | (unsigned short) c),
++			       VGABASE + 2*(max_xpos*current_ypos +
++						current_xpos++));
++			if (current_xpos >= max_xpos) {
++				current_xpos = 0;
++				current_ypos++;
++			}
++		}
++	}
++}
++
++static struct console early_vga_console = {
++	.name =		"earlyvga",
++	.write =	early_vga_write,
++	.flags =	CON_PRINTBUFFER,
++	.index =	-1,
++};
++
++/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
++
++static int early_serial_base = 0x3f8;  /* ttyS0 */
++
++#define XMTRDY          0x20
++
++#define DLAB		0x80
++
++#define TXR             0       /*  Transmit register (WRITE) */
++#define RXR             0       /*  Receive register  (READ)  */
++#define IER             1       /*  Interrupt Enable          */
++#define IIR             2       /*  Interrupt ID              */
++#define FCR             2       /*  FIFO control              */
++#define LCR             3       /*  Line control              */
++#define MCR             4       /*  Modem control             */
++#define LSR             5       /*  Line Status               */
++#define MSR             6       /*  Modem Status              */
++#define DLL             0       /*  Divisor Latch Low         */
++#define DLH             1       /*  Divisor latch High        */
++
++static int early_serial_putc(unsigned char ch)
++{
++	unsigned timeout = 0xffff;
++	while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
++		cpu_relax();
++	outb(ch, early_serial_base + TXR);
++	return timeout ? 0 : -1;
++}
++
++static void early_serial_write(struct console *con, const char *s, unsigned n)
++{
++	while (*s && n-- > 0) {
++		early_serial_putc(*s);
++		if (*s == '\n')
++			early_serial_putc('\r');
++		s++;
++	}
++}
++
++#define DEFAULT_BAUD 9600
++
++static __init void early_serial_init(char *s)
++{
++	unsigned char c;
++	unsigned divisor;
++	unsigned baud = DEFAULT_BAUD;
++	char *e;
++
++	if (*s == ',')
++		++s;
++
++	if (*s) {
++		unsigned port;
++		if (!strncmp(s,"0x",2)) {
++			early_serial_base = simple_strtoul(s, &e, 16);
++		} else {
++			static int bases[] = { 0x3f8, 0x2f8 };
++
++			if (!strncmp(s,"ttyS",4))
++				s += 4;
++			port = simple_strtoul(s, &e, 10);
++			if (port > 1 || s == e)
++				port = 0;
++			early_serial_base = bases[port];
++		}
++		s += strcspn(s, ",");
++		if (*s == ',')
++			s++;
++	}
++
++	outb(0x3, early_serial_base + LCR);	/* 8n1 */
++	outb(0, early_serial_base + IER);	/* no interrupt */
++	outb(0, early_serial_base + FCR);	/* no fifo */
++	outb(0x3, early_serial_base + MCR);	/* DTR + RTS */
++
++	if (*s) {
++		baud = simple_strtoul(s, &e, 0);
++		if (baud == 0 || s == e)
++			baud = DEFAULT_BAUD;
++	}
++
++	divisor = 115200 / baud;
++	c = inb(early_serial_base + LCR);
++	outb(c | DLAB, early_serial_base + LCR);
++	outb(divisor & 0xff, early_serial_base + DLL);
++	outb((divisor >> 8) & 0xff, early_serial_base + DLH);
++	outb(c & ~DLAB, early_serial_base + LCR);
++}
++
++#else /* CONFIG_XEN */
++
++static void
++early_serial_write(struct console *con, const char *s, unsigned count)
++{
++	int n;
++
++	while (count > 0) {
++		n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
++		if (n <= 0)
++			break;
++		count -= n;
++		s += n;
++	}
++} 
++
++static __init void early_serial_init(char *s)
++{
++}
++
++/*
++ * No early VGA console on Xen, as we do not have convenient ISA-space
++ * mappings. Someone should fix this for domain 0. For now, use fake serial.
++ */
++#define early_vga_console early_serial_console
++
++#endif
++
++static struct console early_serial_console = {
++	.name =		"earlyser",
++	.write =	early_serial_write,
++	.flags =	CON_PRINTBUFFER,
++	.index =	-1,
++};
++
++/* Console interface to a host file on AMD's SimNow! */
++
++static int simnow_fd;
++
++enum {
++	MAGIC1 = 0xBACCD00A,
++	MAGIC2 = 0xCA110000,
++	XOPEN = 5,
++	XWRITE = 4,
++};
++
++static noinline long simnow(long cmd, long a, long b, long c)
++{
++	long ret;
++	asm volatile("cpuid" :
++		     "=a" (ret) :
++		     "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
++	return ret;
++}
++
++void __init simnow_init(char *str)
++{
++	char *fn = "klog";
++	if (*str == '=')
++		fn = ++str;
++	/* error ignored */
++	simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
++}
++
++static void simnow_write(struct console *con, const char *s, unsigned n)
++{
++	simnow(XWRITE, simnow_fd, (unsigned long)s, n);
++}
++
++static struct console simnow_console = {
++	.name =		"simnow",
++	.write =	simnow_write,
++	.flags =	CON_PRINTBUFFER,
++	.index =	-1,
++};
++
++/* Direct interface for emergencies */
++struct console *early_console = &early_vga_console;
++static int early_console_initialized = 0;
++
++void early_printk(const char *fmt, ...)
++{
++	char buf[512];
++	int n;
++	va_list ap;
++
++	va_start(ap,fmt);
++	n = vscnprintf(buf,512,fmt,ap);
++	early_console->write(early_console,buf,n);
++	va_end(ap);
++}
++
++static int __initdata keep_early;
++
++int __init setup_early_printk(char *opt)
++{
++	char *space;
++	char buf[256];
++
++	if (early_console_initialized)
++		return 1;
++
++	strlcpy(buf,opt,sizeof(buf));
++	space = strchr(buf, ' ');
++	if (space)
++		*space = 0;
++
++	if (strstr(buf,"keep"))
++		keep_early = 1;
++
++	if (!strncmp(buf, "serial", 6)) {
++		early_serial_init(buf + 6);
++		early_console = &early_serial_console;
++	} else if (!strncmp(buf, "ttyS", 4)) {
++		early_serial_init(buf);
++		early_console = &early_serial_console;
++	} else if (!strncmp(buf, "vga", 3)
++#ifndef CONFIG_XEN
++	           && SCREEN_INFO.orig_video_isVGA == 1) {
++		max_xpos = SCREEN_INFO.orig_video_cols;
++		max_ypos = SCREEN_INFO.orig_video_lines;
++		current_ypos = SCREEN_INFO.orig_y;
++#else
++	           || !strncmp(buf, "xen", 3)) {
++#endif
++		early_console = &early_vga_console;
++ 	} else if (!strncmp(buf, "simnow", 6)) {
++ 		simnow_init(buf + 6);
++ 		early_console = &simnow_console;
++ 		keep_early = 1;
++	}
++	early_console_initialized = 1;
++	register_console(early_console);
++	return 0;
++}
++
++void __init disable_early_printk(void)
++{
++	if (!early_console_initialized || !early_console)
++		return;
++	if (!keep_early) {
++		printk("disabling early console\n");
++		unregister_console(early_console);
++		early_console_initialized = 0;
++	} else {
++		printk("keeping early console\n");
++	}
++}
++
++__setup("earlyprintk=", setup_early_printk);
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/entry-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/entry-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1322 @@
++/*
++ *  linux/arch/x86_64/entry.S
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
++ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
++ * 
++ *  $Id$
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *  Asit Mallick <asit.k.mallick at intel.com>
++ *      Modified for Xen
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after an interrupt and after each system call.
++ * 
++ * Normal syscalls and interrupts don't save a full stack frame, this is 
++ * only done for syscall tracing, signals or fork/exec et.al.
++ * 
++ * A note on terminology:	 
++ * - top of stack: Architecture defined interrupt frame from SS to RIP 
++ * at the top of the kernel process stack.	
++ * - partial stack frame: partially saved registers upto R11.
++ * - full stack frame: Like partial stack frame, but all register saved. 
++ *	
++ * TODO:	 
++ * - schedule it carefully for the final hardware.
++ */
++
++#define ASSEMBLY 1
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/msr.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/hw_irq.h>
++#include <asm/page.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <xen/interface/arch-x86_64.h>
++#include <xen/interface/features.h>
++
++#include "xen_entry.S"
++	
++	.code64
++
++#ifndef CONFIG_PREEMPT
++#define retint_kernel retint_restore_args
++#endif	
++
++
++.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
++#ifdef CONFIG_TRACE_IRQFLAGS
++	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
++	jnc  1f
++	TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++NMI_MASK = 0x80000000
++	
++/*
++ * C code is not supposed to know about undefined top of stack. Every time 
++ * a C function with an pt_regs argument is called from the SYSCALL based 
++ * fast path FIXUP_TOP_OF_STACK is needed.
++ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
++ * manipulation.
++ */        	
++		
++	/* %rsp:at FRAMEEND */ 
++	.macro FIXUP_TOP_OF_STACK tmp
++	movq    $__USER_CS,CS(%rsp)
++	movq 	$-1,RCX(%rsp)
++	.endm
++
++	.macro RESTORE_TOP_OF_STACK tmp,offset=0
++	.endm
++
++	.macro FAKE_STACK_FRAME child_rip
++	/* push in order ss, rsp, eflags, cs, rip */
++	xorl %eax, %eax
++	pushq %rax /* ss */
++	CFI_ADJUST_CFA_OFFSET	8
++	/*CFI_REL_OFFSET	ss,0*/
++	pushq %rax /* rsp */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_REL_OFFSET	rsp,0
++	pushq $(1<<9) /* eflags - interrupts on */
++	CFI_ADJUST_CFA_OFFSET	8
++	/*CFI_REL_OFFSET	rflags,0*/
++	pushq $__KERNEL_CS /* cs */
++	CFI_ADJUST_CFA_OFFSET	8
++	/*CFI_REL_OFFSET	cs,0*/
++	pushq \child_rip /* rip */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_REL_OFFSET	rip,0
++	pushq	%rax /* orig rax */
++	CFI_ADJUST_CFA_OFFSET	8
++	.endm
++
++	.macro UNFAKE_STACK_FRAME
++	addq $8*6, %rsp
++	CFI_ADJUST_CFA_OFFSET	-(6*8)
++	.endm
++
++	.macro	CFI_DEFAULT_STACK start=1,adj=0
++	.if \start
++	CFI_STARTPROC	simple
++	CFI_DEF_CFA	rsp,SS+8-(\adj*ARGOFFSET)
++	.else
++	CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
++	.endif
++	.if \adj == 0
++	CFI_REL_OFFSET	r15,R15
++	CFI_REL_OFFSET	r14,R14
++	CFI_REL_OFFSET	r13,R13
++	CFI_REL_OFFSET	r12,R12
++	CFI_REL_OFFSET	rbp,RBP
++	CFI_REL_OFFSET	rbx,RBX
++	.endif
++	CFI_REL_OFFSET	r11,R11
++	CFI_REL_OFFSET	r10,R10
++	CFI_REL_OFFSET	r9,R9
++	CFI_REL_OFFSET	r8,R8
++	CFI_REL_OFFSET	rax,RAX
++	CFI_REL_OFFSET	rcx,RCX
++	CFI_REL_OFFSET	rdx,RDX
++	CFI_REL_OFFSET	rsi,RSI
++	CFI_REL_OFFSET	rdi,RDI
++	CFI_REL_OFFSET	rip,RIP
++	/*CFI_REL_OFFSET	cs,CS*/
++	/*CFI_REL_OFFSET	rflags,EFLAGS*/
++	CFI_REL_OFFSET	rsp,RSP
++	/*CFI_REL_OFFSET	ss,SS*/
++	.endm
++
++        /*
++         * Must be consistent with the definition in arch-x86/xen-x86_64.h:
++         *     struct iret_context {
++         *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++         *     };
++         * with rax, r11, and rcx being taken care of in the hypercall stub.
++         */
++	.macro HYPERVISOR_IRET flag
++	testb $3,1*8(%rsp)
++	jnz   2f
++	testl $NMI_MASK,2*8(%rsp)
++	jnz   2f
++
++	cmpb  $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
++	jne   1f
++
++	/* Direct iret to kernel space. Correct CS and SS. */
++	orl   $3,1*8(%rsp)
++	orl   $3,4*8(%rsp)
++1:	iretq
++
++2:	/* Slow iret via hypervisor. */
++	andl  $~NMI_MASK, 2*8(%rsp)
++	pushq $\flag
++	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
++	.endm
++
++/*
++ * A newly forked process directly context switches into this.
++ */ 	
++/* rdi:	prev */	
++ENTRY(ret_from_fork)
++	CFI_DEFAULT_STACK
++	push kernel_eflags(%rip)
++	CFI_ADJUST_CFA_OFFSET 4
++	popf				# reset kernel eflags
++	CFI_ADJUST_CFA_OFFSET -4
++	call schedule_tail
++	GET_THREAD_INFO(%rcx)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
++	jnz rff_trace
++rff_action:	
++	RESTORE_REST
++	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
++	je   int_ret_from_sys_call
++	testl $_TIF_IA32,threadinfo_flags(%rcx)
++	jnz  int_ret_from_sys_call
++	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
++	jmp ret_from_sys_call
++rff_trace:
++	movq %rsp,%rdi
++	call syscall_trace_leave
++	GET_THREAD_INFO(%rcx)	
++	jmp rff_action
++	CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * initial frame state for interrupts and exceptions
++ */
++	.macro _frame ref
++	CFI_STARTPROC simple
++	CFI_DEF_CFA rsp,SS+8-\ref
++	/*CFI_REL_OFFSET ss,SS-\ref*/
++	CFI_REL_OFFSET rsp,RSP-\ref
++	/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
++	/*CFI_REL_OFFSET cs,CS-\ref*/
++	CFI_REL_OFFSET rip,RIP-\ref
++	.endm
++
++/*
++ * System call entry. Upto 6 arguments in registers are supported.
++ *
++ * SYSCALL does not save anything on the stack and does not change the
++ * stack pointer.
++ */
++		
++/*
++ * Register setup:	
++ * rax  system call number
++ * rdi  arg0
++ * rcx  return address for syscall/sysret, C arg3 
++ * rsi  arg1
++ * rdx  arg2	
++ * r10  arg3 	(--> moved to rcx for C)
++ * r8   arg4
++ * r9   arg5
++ * r11  eflags for syscall/sysret, temporary for C
++ * r12-r15,rbp,rbx saved by C code, not touched. 		
++ * 
++ * Interrupts are enabled on entry.
++ * Only called from user space.
++ *
++ * XXX	if we had a free scratch register we could save the RSP into the stack frame
++ *      and report it properly in ps. Unfortunately we haven't.
++ *
++ * When user can change the frames always force IRET. That is because
++ * it deals with uncanonical addresses better. SYSRET has trouble
++ * with them due to bugs in both AMD and Intel CPUs.
++ */ 			 		
++
++ENTRY(system_call)
++	_frame (RIP-0x10)
++	SAVE_ARGS -8,0
++	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
++	GET_THREAD_INFO(%rcx)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++	CFI_REMEMBER_STATE
++	jnz tracesys
++	cmpq $__NR_syscall_max,%rax
++	ja badsys
++	movq %r10,%rcx
++	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
++	movq %rax,RAX-ARGOFFSET(%rsp)
++/*
++ * Syscall return path ending with SYSRET (fast path)
++ * Has incomplete stack frame and undefined top of stack. 
++ */		
++	.globl ret_from_sys_call
++ret_from_sys_call:
++	movl $_TIF_ALLWORK_MASK,%edi
++	/* edi:	flagmask */
++sysret_check:		
++	GET_THREAD_INFO(%rcx)
++        XEN_BLOCK_EVENTS(%rsi)        
++	TRACE_IRQS_OFF
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	CFI_REMEMBER_STATE
++	jnz  sysret_careful 
++	/*
++	 * sysretq will re-enable interrupts:
++	 */
++	TRACE_IRQS_ON
++        XEN_UNBLOCK_EVENTS(%rsi)                
++	RESTORE_ARGS 0,8,0
++        HYPERVISOR_IRET VGCF_IN_SYSCALL
++
++	/* Handle reschedules */
++	/* edx:	work, edi: workmask */	
++sysret_careful:
++	CFI_RESTORE_STATE
++	bt $TIF_NEED_RESCHED,%edx
++	jnc sysret_signal
++	TRACE_IRQS_ON
++	XEN_UNBLOCK_EVENTS(%rsi)
++	pushq %rdi
++	CFI_ADJUST_CFA_OFFSET 8
++	call schedule
++	popq  %rdi
++	CFI_ADJUST_CFA_OFFSET -8
++	jmp sysret_check
++
++	/* Handle a signal */ 
++sysret_signal:
++	TRACE_IRQS_ON
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)        
++	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++	jz    1f
++
++	/* Really a signal */
++	/* edx:	work flags (arg3) */
++	leaq do_notify_resume(%rip),%rax
++	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
++	xorl %esi,%esi # oldset -> arg2
++	call ptregscall_common
++1:	movl $_TIF_NEED_RESCHED,%edi
++	/* Use IRET because user could have changed frame. This
++	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
++	XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++	jmp int_with_check
++	
++badsys:
++	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++	jmp ret_from_sys_call
++
++	/* Do syscall tracing */
++tracesys:			 
++	CFI_RESTORE_STATE
++	SAVE_REST
++	movq $-ENOSYS,RAX(%rsp)
++	FIXUP_TOP_OF_STACK %rdi
++	movq %rsp,%rdi
++	call syscall_trace_enter
++	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	cmpq $__NR_syscall_max,%rax
++	ja  int_ret_from_sys_call
++	movq %r10,%rcx	/* fixup for C */
++	call *sys_call_table(,%rax,8)
++	movq %rax,RAX-ARGOFFSET(%rsp)
++	/* Use IRET because user could have changed frame */
++	jmp int_ret_from_sys_call
++	CFI_ENDPROC
++END(system_call)
++		
++/* 
++ * Syscall return path ending with IRET.
++ * Has correct top of stack, but partial stack frame.
++ */ 	
++ENTRY(int_ret_from_sys_call)
++	CFI_STARTPROC	simple
++	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
++	/*CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
++	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
++	/*CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
++	/*CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
++	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
++	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
++	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
++	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
++	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
++	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
++	CFI_REL_OFFSET	r8,R8-ARGOFFSET
++	CFI_REL_OFFSET	r9,R9-ARGOFFSET
++	CFI_REL_OFFSET	r10,R10-ARGOFFSET
++	CFI_REL_OFFSET	r11,R11-ARGOFFSET
++        XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++	testb $3,CS-ARGOFFSET(%rsp)
++        jnz 1f
++        /* Need to set the proper %ss (not NULL) for ring 3 iretq */
++        movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++        jmp retint_restore_args   # retrun from ring3 kernel
++1:              
++	movl $_TIF_ALLWORK_MASK,%edi
++	/* edi:	mask to check */
++int_with_check:
++	GET_THREAD_INFO(%rcx)
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	jnz   int_careful
++	andl    $~TS_COMPAT,threadinfo_status(%rcx)
++	jmp   retint_restore_args
++
++	/* Either reschedule or signal or syscall exit tracking needed. */
++	/* First do a reschedule test. */
++	/* edx:	work, edi: workmask */
++int_careful:
++	bt $TIF_NEED_RESCHED,%edx
++	jnc  int_very_careful
++	TRACE_IRQS_ON
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)
++	pushq %rdi
++	CFI_ADJUST_CFA_OFFSET 8
++	call schedule
++	popq %rdi
++	CFI_ADJUST_CFA_OFFSET -8
++	XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++	jmp int_with_check
++
++	/* handle signals and tracing -- both require a full stack frame */
++int_very_careful:
++	TRACE_IRQS_ON
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)
++	SAVE_REST
++	/* Check for syscall exit trace */	
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
++	jz int_signal
++	pushq %rdi
++	CFI_ADJUST_CFA_OFFSET 8
++	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
++	call syscall_trace_leave
++	popq %rdi
++	CFI_ADJUST_CFA_OFFSET -8
++	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
++	XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++	jmp int_restore_rest
++	
++int_signal:
++	testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
++	jz 1f
++	movq %rsp,%rdi		# &ptregs -> arg1
++	xorl %esi,%esi		# oldset -> arg2
++	call do_notify_resume
++1:	movl $_TIF_NEED_RESCHED,%edi	
++int_restore_rest:
++	RESTORE_REST
++	XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++	jmp int_with_check
++	CFI_ENDPROC
++END(int_ret_from_sys_call)
++		
++/* 
++ * Certain special system calls that need to save a complete full stack frame.
++ */ 								
++	
++	.macro PTREGSCALL label,func,arg
++	.globl \label
++\label:
++	leaq	\func(%rip),%rax
++	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++	jmp	ptregscall_common
++END(\label)
++	.endm
++
++	CFI_STARTPROC
++
++	PTREGSCALL stub_clone, sys_clone, %r8
++	PTREGSCALL stub_fork, sys_fork, %rdi
++	PTREGSCALL stub_vfork, sys_vfork, %rdi
++	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
++	PTREGSCALL stub_iopl, sys_iopl, %rsi
++
++ENTRY(ptregscall_common)
++	popq %r11
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_REGISTER rip, r11
++	SAVE_REST
++	movq %r11, %r15
++	CFI_REGISTER rip, r15
++	FIXUP_TOP_OF_STACK %r11
++	call *%rax
++	RESTORE_TOP_OF_STACK %r11
++	movq %r15, %r11
++	CFI_REGISTER rip, r11
++	RESTORE_REST
++	pushq %r11
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rip, 0
++	ret
++	CFI_ENDPROC
++END(ptregscall_common)
++	
++ENTRY(stub_execve)
++	CFI_STARTPROC
++	popq %r11
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_REGISTER rip, r11
++	SAVE_REST
++	FIXUP_TOP_OF_STACK %r11
++	call sys_execve
++	RESTORE_TOP_OF_STACK %r11
++	movq %rax,RAX(%rsp)
++	RESTORE_REST
++	jmp int_ret_from_sys_call
++	CFI_ENDPROC
++END(stub_execve)
++	
++/*
++ * sigreturn is special because it needs to restore all registers on return.
++ * This cannot be done with SYSRET, so use the IRET return path instead.
++ */                
++ENTRY(stub_rt_sigreturn)
++	CFI_STARTPROC
++	addq $8, %rsp
++	CFI_ADJUST_CFA_OFFSET	-8
++	SAVE_REST
++	movq %rsp,%rdi
++	FIXUP_TOP_OF_STACK %r11
++	call sys_rt_sigreturn
++	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
++	RESTORE_REST
++	jmp int_ret_from_sys_call
++	CFI_ENDPROC
++END(stub_rt_sigreturn)
++
++/* initial frame state for interrupts (and exceptions without error code) */
++#define INTR_FRAME _frame (RIP-0x10); \
++	CFI_REL_OFFSET rcx,0; \
++	CFI_REL_OFFSET r11,8
++
++/* initial frame state for exceptions with error code (and interrupts with
++   vector already pushed) */
++#define XCPT_FRAME _frame (RIP-0x18); \
++	CFI_REL_OFFSET rcx,0; \
++	CFI_REL_OFFSET r11,8
++
++/* 
++ * Interrupt exit.
++ *
++ */ 
++
++retint_check:
++	CFI_DEFAULT_STACK adj=1
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	CFI_REMEMBER_STATE
++	jnz  retint_careful
++retint_restore_args:
++	movl EFLAGS-REST_SKIP(%rsp), %eax
++	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
++	XEN_GET_VCPU_INFO(%rsi)
++	andb evtchn_upcall_mask(%rsi),%al
++	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
++	jnz restore_all_enable_events	#        != 0 => enable event delivery
++	XEN_PUT_VCPU_INFO(%rsi)
++		
++	RESTORE_ARGS 0,8,0
++	HYPERVISOR_IRET 0
++	
++	/* edi: workmask, edx: work */
++retint_careful:
++	CFI_RESTORE_STATE
++	bt    $TIF_NEED_RESCHED,%edx
++	jnc   retint_signal
++	TRACE_IRQS_ON
++	XEN_UNBLOCK_EVENTS(%rsi)
++/*	sti */        
++	pushq %rdi
++	CFI_ADJUST_CFA_OFFSET	8
++	call  schedule
++	popq %rdi		
++	CFI_ADJUST_CFA_OFFSET	-8
++	GET_THREAD_INFO(%rcx)
++	XEN_BLOCK_EVENTS(%rsi)		
++/*	cli */
++	TRACE_IRQS_OFF
++	jmp retint_check
++	
++retint_signal:
++	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++	jz    retint_restore_args
++	TRACE_IRQS_ON
++        XEN_UNBLOCK_EVENTS(%rsi)
++	SAVE_REST
++	movq $-1,ORIG_RAX(%rsp) 			
++	xorl %esi,%esi		# oldset
++	movq %rsp,%rdi		# &pt_regs
++	call do_notify_resume
++	RESTORE_REST
++        XEN_BLOCK_EVENTS(%rsi)		
++	TRACE_IRQS_OFF
++	movl $_TIF_NEED_RESCHED,%edi
++	GET_THREAD_INFO(%rcx)
++	jmp retint_check
++
++#ifdef CONFIG_PREEMPT
++	/* Returning to kernel space. Check if we need preemption */
++	/* rcx:	 threadinfo. interrupts off. */
++	.p2align
++retint_kernel:	
++	cmpl $0,threadinfo_preempt_count(%rcx)
++	jnz  retint_restore_args
++	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
++	jnc  retint_restore_args
++	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
++	jnc  retint_restore_args
++	call preempt_schedule_irq
++	jmp retint_kernel       /* check again */
++#endif	
++
++	CFI_ENDPROC
++END(retint_check)
++	
++#ifndef CONFIG_XEN
++/*
++ * APIC interrupts.
++ */		
++	.macro apicinterrupt num,func
++	INTR_FRAME
++	pushq $~(\num)
++	CFI_ADJUST_CFA_OFFSET 8
++	interrupt \func
++	jmp error_entry
++	CFI_ENDPROC
++	.endm
++
++ENTRY(thermal_interrupt)
++	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
++END(thermal_interrupt)
++
++ENTRY(threshold_interrupt)
++	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
++END(threshold_interrupt)
++
++#ifdef CONFIG_SMP	
++ENTRY(reschedule_interrupt)
++	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
++END(reschedule_interrupt)
++
++	.macro INVALIDATE_ENTRY num
++ENTRY(invalidate_interrupt\num)
++	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
++END(invalidate_interrupt\num)
++	.endm
++
++	INVALIDATE_ENTRY 0
++	INVALIDATE_ENTRY 1
++	INVALIDATE_ENTRY 2
++	INVALIDATE_ENTRY 3
++	INVALIDATE_ENTRY 4
++	INVALIDATE_ENTRY 5
++	INVALIDATE_ENTRY 6
++	INVALIDATE_ENTRY 7
++
++ENTRY(call_function_interrupt)
++	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
++END(call_function_interrupt)
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC	
++ENTRY(apic_timer_interrupt)
++	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
++END(apic_timer_interrupt)
++
++ENTRY(error_interrupt)
++	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
++END(error_interrupt)
++
++ENTRY(spurious_interrupt)
++	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
++END(spurious_interrupt)
++#endif
++#endif /* !CONFIG_XEN */
++				
++/*
++ * Exception entry points.
++ */ 		
++	.macro zeroentry sym
++	INTR_FRAME
++        movq (%rsp),%rcx
++	CFI_RESTORE rcx
++        movq 8(%rsp),%r11
++	CFI_RESTORE r11
++        addq $0x10,%rsp /* skip rcx and r11 */
++	CFI_ADJUST_CFA_OFFSET -0x10
++	pushq $0	/* push error code/oldrax */ 
++	CFI_ADJUST_CFA_OFFSET 8
++	pushq %rax	/* push real oldrax to the rdi slot */ 
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rax,0
++	leaq  \sym(%rip),%rax
++	jmp error_entry
++	CFI_ENDPROC
++	.endm	
++
++	.macro errorentry sym
++	XCPT_FRAME
++        movq (%rsp),%rcx
++	CFI_RESTORE rcx
++        movq 8(%rsp),%r11
++	CFI_RESTORE r11
++        addq $0x10,%rsp /* rsp points to the error code */
++	CFI_ADJUST_CFA_OFFSET -0x10
++	pushq %rax
++	CFI_ADJUST_CFA_OFFSET 8
++	CFI_REL_OFFSET rax,0
++	leaq  \sym(%rip),%rax
++	jmp error_entry
++	CFI_ENDPROC
++	.endm
++
++#if 0 /* not XEN */
++	/* error code is on the stack already */
++	/* handle NMI like exceptions that can happen everywhere */
++	.macro paranoidentry sym, ist=0, irqtrace=1
++        movq (%rsp),%rcx
++        movq 8(%rsp),%r11
++        addq $0x10,%rsp /* skip rcx and r11 */        
++	SAVE_ALL
++	cld
++#if 0 /* not XEN */
++	movl $1,%ebx
++	movl  $MSR_GS_BASE,%ecx
++	rdmsr
++	testl %edx,%edx
++	js    1f
++	swapgs
++	xorl  %ebx,%ebx
++1:
++#endif
++	.if \ist
++	movq	%gs:pda_data_offset, %rbp
++	.endif
++	movq %rsp,%rdi
++	movq ORIG_RAX(%rsp),%rsi
++	movq $-1,ORIG_RAX(%rsp)
++	.if \ist
++	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++	.endif
++	call \sym
++	.if \ist
++	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++	.endif
++/*	cli */
++	XEN_BLOCK_EVENTS(%rsi)		
++	.if \irqtrace
++	TRACE_IRQS_OFF
++	.endif
++	.endm
++
++	/*
++ 	 * "Paranoid" exit path from exception stack.
++  	 * Paranoid because this is used by NMIs and cannot take
++	 * any kernel state for granted.
++	 * We don't do kernel preemption checks here, because only
++	 * NMI should be common and it does not enable IRQs and
++	 * cannot get reschedule ticks.
++	 *
++	 * "trace" is 0 for the NMI handler only, because irq-tracing
++	 * is fundamentally NMI-unsafe. (we cannot change the soft and
++	 * hard flags at once, atomically)
++	 */
++	.macro paranoidexit trace=1
++	/* ebx:	no swapgs flag */
++paranoid_exit\trace:
++	testl %ebx,%ebx				/* swapgs needed? */
++	jnz paranoid_restore\trace
++	testl $3,CS(%rsp)
++	jnz   paranoid_userspace\trace
++paranoid_swapgs\trace:
++	TRACE_IRQS_IRETQ 0
++	swapgs
++paranoid_restore\trace:
++	RESTORE_ALL 8
++	iretq
++paranoid_userspace\trace:
++	GET_THREAD_INFO(%rcx)
++	movl threadinfo_flags(%rcx),%ebx
++	andl $_TIF_WORK_MASK,%ebx
++	jz paranoid_swapgs\trace
++	movq %rsp,%rdi			/* &pt_regs */
++	call sync_regs
++	movq %rax,%rsp			/* switch stack for scheduling */
++	testl $_TIF_NEED_RESCHED,%ebx
++	jnz paranoid_schedule\trace
++	movl %ebx,%edx			/* arg3: thread flags */
++	.if \trace
++	TRACE_IRQS_ON
++	.endif
++	sti
++	xorl %esi,%esi 			/* arg2: oldset */
++	movq %rsp,%rdi 			/* arg1: &pt_regs */
++	call do_notify_resume
++	cli
++	.if \trace
++	TRACE_IRQS_OFF
++	.endif
++	jmp paranoid_userspace\trace
++paranoid_schedule\trace:
++	.if \trace
++	TRACE_IRQS_ON
++	.endif
++	sti
++	call schedule
++	cli
++	.if \trace
++	TRACE_IRQS_OFF
++	.endif
++	jmp paranoid_userspace\trace
++	CFI_ENDPROC
++	.endm
++#endif
++
++/*
++ * Exception entry point. This expects an error code/orig_rax on the stack
++ * and the exception handler in %rax.	
++ */ 		  				
++ENTRY(error_entry)
++	_frame RDI
++	CFI_REL_OFFSET rax,0
++	/* rdi slot contains rax, oldrax contains error code */
++	cld	
++	subq  $14*8,%rsp
++	CFI_ADJUST_CFA_OFFSET	(14*8)
++	movq %rsi,13*8(%rsp)
++	CFI_REL_OFFSET	rsi,RSI
++	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
++	CFI_REGISTER	rax,rsi
++	movq %rdx,12*8(%rsp)
++	CFI_REL_OFFSET	rdx,RDX
++	movq %rcx,11*8(%rsp)
++	CFI_REL_OFFSET	rcx,RCX
++	movq %rsi,10*8(%rsp)	/* store rax */ 
++	CFI_REL_OFFSET	rax,RAX
++	movq %r8, 9*8(%rsp)
++	CFI_REL_OFFSET	r8,R8
++	movq %r9, 8*8(%rsp)
++	CFI_REL_OFFSET	r9,R9
++	movq %r10,7*8(%rsp)
++	CFI_REL_OFFSET	r10,R10
++	movq %r11,6*8(%rsp)
++	CFI_REL_OFFSET	r11,R11
++	movq %rbx,5*8(%rsp) 
++	CFI_REL_OFFSET	rbx,RBX
++	movq %rbp,4*8(%rsp) 
++	CFI_REL_OFFSET	rbp,RBP
++	movq %r12,3*8(%rsp) 
++	CFI_REL_OFFSET	r12,R12
++	movq %r13,2*8(%rsp) 
++	CFI_REL_OFFSET	r13,R13
++	movq %r14,1*8(%rsp) 
++	CFI_REL_OFFSET	r14,R14
++	movq %r15,(%rsp) 
++	CFI_REL_OFFSET	r15,R15
++#if 0        
++	cmpl $__KERNEL_CS,CS(%rsp)
++	CFI_REMEMBER_STATE
++	je  error_kernelspace
++#endif        
++error_call_handler:
++	movq %rdi, RDI(%rsp)            
++	CFI_REL_OFFSET	rdi,RDI
++	movq %rsp,%rdi
++	movq ORIG_RAX(%rsp),%rsi	# get error code 
++	movq $-1,ORIG_RAX(%rsp)
++	call *%rax
++error_exit:		
++	RESTORE_REST
++/*	cli */
++	XEN_BLOCK_EVENTS(%rsi)		
++	TRACE_IRQS_OFF
++	GET_THREAD_INFO(%rcx)	
++	testb $3,CS-ARGOFFSET(%rsp)
++	jz retint_kernel
++	movl  threadinfo_flags(%rcx),%edx
++	movl  $_TIF_WORK_MASK,%edi	
++	andl  %edi,%edx
++	jnz   retint_careful
++	/*
++	 * The iret might restore flags:
++	 */
++	TRACE_IRQS_IRETQ
++	jmp   retint_restore_args
++
++#if 0
++         /*
++         * We need to re-write the logic here because we don't do iretq to 
++         * to return to user mode. It's still possible that we get trap/fault
++         * in the kernel (when accessing buffers pointed to by system calls, 
++         * for example).
++         *
++         */           
++	CFI_RESTORE_STATE
++error_kernelspace:
++	incl %ebx
++       /* There are two places in the kernel that can potentially fault with
++          usergs. Handle them here. The exception handlers after
++	   iret run with kernel gs again, so don't set the user space flag.
++	   B stepping K8s sometimes report an truncated RIP for IRET 
++	   exceptions returning to compat mode. Check for these here too. */
++	leaq iret_label(%rip),%rbp
++	cmpq %rbp,RIP(%rsp) 
++	je   error_swapgs
++	movl %ebp,%ebp	/* zero extend */
++	cmpq %rbp,RIP(%rsp) 
++	je   error_swapgs
++	cmpq $gs_change,RIP(%rsp)
++        je   error_swapgs
++	jmp  error_sti
++#endif
++	CFI_ENDPROC
++END(error_entry)
++	
++ENTRY(hypervisor_callback)
++	zeroentry do_hypervisor_callback
++END(hypervisor_callback)
++        
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */               
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
++	CFI_STARTPROC
++# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
++# see the correct pointer to the pt_regs
++	movq %rdi, %rsp            # we don't return, adjust the stack frame
++	CFI_ENDPROC
++	CFI_DEFAULT_STACK
++11:	incl %gs:pda_irqcount
++	movq %rsp,%rbp
++	CFI_DEF_CFA_REGISTER rbp
++	cmovzq %gs:pda_irqstackptr,%rsp
++	pushq %rbp			# backlink for old unwinder
++	call evtchn_do_upcall
++	popq %rsp
++	CFI_DEF_CFA_REGISTER rsp
++	decl %gs:pda_irqcount
++	jmp  error_exit
++	CFI_ENDPROC
++END(do_hypervisor_callback)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++KPROBE_ENTRY(nmi)
++	zeroentry do_nmi_callback
++ENTRY(do_nmi_callback)
++	CFI_STARTPROC
++        addq $8, %rsp
++	CFI_ENDPROC
++	CFI_DEFAULT_STACK
++        call do_nmi
++        orl  $NMI_MASK,EFLAGS(%rsp)
++        RESTORE_REST
++        XEN_BLOCK_EVENTS(%rsi)
++	TRACE_IRQS_OFF
++        GET_THREAD_INFO(%rcx)
++        jmp  retint_restore_args
++	CFI_ENDPROC
++	.previous .text
++END(nmi)
++#endif
++
++        ALIGN
++restore_all_enable_events:  
++	CFI_DEFAULT_STACK adj=1
++	TRACE_IRQS_ON
++	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
++
++scrit:	/**** START OF CRITICAL REGION ****/
++	XEN_TEST_PENDING(%rsi)
++	CFI_REMEMBER_STATE
++	jnz  14f			# process more events if necessary...
++	XEN_PUT_VCPU_INFO(%rsi)
++        RESTORE_ARGS 0,8,0
++        HYPERVISOR_IRET 0
++        
++	CFI_RESTORE_STATE
++14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
++	XEN_PUT_VCPU_INFO(%rsi)
++	SAVE_REST
++        movq %rsp,%rdi                  # set the argument again
++	jmp  11b
++	CFI_ENDPROC
++ecrit:  /**** END OF CRITICAL REGION ****/
++# At this point, unlike on x86-32, we don't do the fixup to simplify the 
++# code and the stack frame is more complex on x86-64.
++# When the kernel is interrupted in the critical section, the kernel 
++# will do IRET in that case, and everything will be restored at that point, 
++# i.e. it just resumes from the next instruction interrupted with the same context. 
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++#  1. Fault while reloading DS, ES, FS or GS
++#  2. Fault while executing IRET
++# Category 1 we do not need to fix up as Xen has already reloaded all segment
++# registers that could be reloaded and zeroed the others.
++# Category 2 we fix up by killing the current process. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by comparing each saved segment register
++# with its current contents: any discrepancy means we in category 1.
++ENTRY(failsafe_callback)
++	_frame (RIP-0x30)
++	CFI_REL_OFFSET rcx, 0
++	CFI_REL_OFFSET r11, 8
++	movw %ds,%cx
++	cmpw %cx,0x10(%rsp)
++	CFI_REMEMBER_STATE
++	jne 1f
++	movw %es,%cx
++	cmpw %cx,0x18(%rsp)
++	jne 1f
++	movw %fs,%cx
++	cmpw %cx,0x20(%rsp)
++	jne 1f
++	movw %gs,%cx
++	cmpw %cx,0x28(%rsp)
++	jne 1f
++	/* All segments match their saved values => Category 2 (Bad IRET). */
++	movq (%rsp),%rcx
++	CFI_RESTORE rcx
++	movq 8(%rsp),%r11
++	CFI_RESTORE r11
++	addq $0x30,%rsp
++	CFI_ADJUST_CFA_OFFSET -0x30
++	movq $11,%rdi	/* SIGSEGV */
++	jmp do_exit			
++	CFI_RESTORE_STATE
++1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
++	movq (%rsp),%rcx
++	CFI_RESTORE rcx
++	movq 8(%rsp),%r11
++	CFI_RESTORE r11
++	addq $0x30,%rsp
++	CFI_ADJUST_CFA_OFFSET -0x30
++	pushq $0
++	CFI_ADJUST_CFA_OFFSET 8
++	SAVE_ALL
++	jmp error_exit
++	CFI_ENDPROC
++#if 0	      
++        .section __ex_table,"a"
++        .align 8
++        .quad gs_change,bad_gs
++        .previous
++        .section .fixup,"ax"
++	/* running with kernelgs */
++bad_gs: 
++/*	swapgs		*/	/* switch back to user gs */
++	xorl %eax,%eax
++        movl %eax,%gs
++        jmp  2b
++        .previous       
++#endif
++	
++/*
++ * Create a kernel thread.
++ *
++ * C extern interface:
++ *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++ *
++ * asm input arguments:
++ *	rdi: fn, rsi: arg, rdx: flags
++ */
++ENTRY(kernel_thread)
++	CFI_STARTPROC
++	FAKE_STACK_FRAME $child_rip
++	SAVE_ALL
++
++	# rdi: flags, rsi: usp, rdx: will be &pt_regs
++	movq %rdx,%rdi
++	orq  kernel_thread_flags(%rip),%rdi
++	movq $-1, %rsi
++	movq %rsp, %rdx
++
++	xorl %r8d,%r8d
++	xorl %r9d,%r9d
++	
++	# clone now
++	call do_fork
++	movq %rax,RAX(%rsp)
++	xorl %edi,%edi
++
++	/*
++	 * It isn't worth to check for reschedule here,
++	 * so internally to the x86_64 port you can rely on kernel_thread()
++	 * not to reschedule the child before returning, this avoids the need
++	 * of hacks for example to fork off the per-CPU idle tasks.
++         * [Hopefully no generic code relies on the reschedule -AK]	
++	 */
++	RESTORE_ALL
++	UNFAKE_STACK_FRAME
++	ret
++	CFI_ENDPROC
++ENDPROC(kernel_thread)
++	
++child_rip:
++	pushq $0		# fake return address
++	CFI_STARTPROC
++	/*
++	 * Here we are in the child and the registers are set as they were
++	 * at kernel_thread() invocation in the parent.
++	 */
++	movq %rdi, %rax
++	movq %rsi, %rdi
++	call *%rax
++	# exit
++	xorl %edi, %edi
++	call do_exit
++	CFI_ENDPROC
++ENDPROC(child_rip)
++
++/*
++ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
++ *
++ * C extern interface:
++ *	 extern long execve(char *name, char **argv, char **envp)
++ *
++ * asm input arguments:
++ *	rdi: name, rsi: argv, rdx: envp
++ *
++ * We want to fallback into:
++ *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
++ *
++ * do_sys_execve asm fallback arguments:
++ *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack
++ */
++ENTRY(execve)
++	CFI_STARTPROC
++	FAKE_STACK_FRAME $0
++	SAVE_ALL	
++	call sys_execve
++	movq %rax, RAX(%rsp)	
++	RESTORE_REST
++	testq %rax,%rax
++	jne 1f
++        jmp int_ret_from_sys_call
++1:      RESTORE_ARGS
++	UNFAKE_STACK_FRAME
++	ret
++	CFI_ENDPROC
++ENDPROC(execve)
++
++KPROBE_ENTRY(page_fault)
++	errorentry do_page_fault
++END(page_fault)
++	.previous .text
++
++ENTRY(coprocessor_error)
++	zeroentry do_coprocessor_error
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++	zeroentry do_simd_coprocessor_error	
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++	zeroentry math_state_restore
++END(device_not_available)
++
++	/* runs on exception stack */
++KPROBE_ENTRY(debug)
++/* 	INTR_FRAME
++	pushq $0
++	CFI_ADJUST_CFA_OFFSET 8	*/
++	zeroentry do_debug
++/*	paranoidexit
++	CFI_ENDPROC */
++END(debug)
++	.previous .text
++
++#if 0
++	/* runs on exception stack */	
++KPROBE_ENTRY(nmi)
++	INTR_FRAME
++	pushq $-1
++	CFI_ADJUST_CFA_OFFSET 8
++	paranoidentry do_nmi, 0, 0
++#ifdef CONFIG_TRACE_IRQFLAGS
++	paranoidexit 0
++#else
++	jmp paranoid_exit1
++ 	CFI_ENDPROC
++#endif
++END(nmi)
++	.previous .text
++#endif        
++
++KPROBE_ENTRY(int3)
++/* 	INTR_FRAME
++ 	pushq $0
++ 	CFI_ADJUST_CFA_OFFSET 8 */
++ 	zeroentry do_int3
++/* 	jmp paranoid_exit1
++ 	CFI_ENDPROC */
++END(int3)
++	.previous .text
++
++ENTRY(overflow)
++	zeroentry do_overflow
++END(overflow)
++
++ENTRY(bounds)
++	zeroentry do_bounds
++END(bounds)
++
++ENTRY(invalid_op)
++	zeroentry do_invalid_op	
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++	zeroentry do_coprocessor_segment_overrun
++END(coprocessor_segment_overrun)
++
++ENTRY(reserved)
++	zeroentry do_reserved
++END(reserved)
++
++#if 0
++	/* runs on exception stack */
++ENTRY(double_fault)
++	XCPT_FRAME
++	paranoidentry do_double_fault
++	jmp paranoid_exit1
++	CFI_ENDPROC
++END(double_fault)
++#endif
++
++ENTRY(invalid_TSS)
++	errorentry do_invalid_TSS
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++	errorentry do_segment_not_present
++END(segment_not_present)
++
++	/* runs on exception stack */
++ENTRY(stack_segment)
++/*	XCPT_FRAME
++	paranoidentry do_stack_segment */
++	errorentry do_stack_segment
++/*	jmp paranoid_exit1
++	CFI_ENDPROC */
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++	errorentry do_general_protection
++END(general_protection)
++	.previous .text
++
++ENTRY(alignment_check)
++	errorentry do_alignment_check
++END(alignment_check)
++
++ENTRY(divide_error)
++	zeroentry do_divide_error
++END(divide_error)
++
++ENTRY(spurious_interrupt_bug)
++	zeroentry do_spurious_interrupt_bug
++END(spurious_interrupt_bug)
++
++#ifdef CONFIG_X86_MCE
++	/* runs on exception stack */
++ENTRY(machine_check)
++	INTR_FRAME
++	pushq $0
++	CFI_ADJUST_CFA_OFFSET 8	
++	paranoidentry do_machine_check
++	jmp paranoid_exit1
++	CFI_ENDPROC
++END(machine_check)
++#endif
++
++/* Call softirq on interrupt stack. Interrupts are off. */
++ENTRY(call_softirq)
++	CFI_STARTPROC
++	push %rbp
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_REL_OFFSET rbp,0
++	mov  %rsp,%rbp
++	CFI_DEF_CFA_REGISTER rbp
++	incl %gs:pda_irqcount
++	cmove %gs:pda_irqstackptr,%rsp
++	push  %rbp			# backlink for old unwinder
++	call __do_softirq
++	leaveq
++	CFI_DEF_CFA_REGISTER	rsp
++	CFI_ADJUST_CFA_OFFSET   -8
++	decl %gs:pda_irqcount
++	ret
++	CFI_ENDPROC
++ENDPROC(call_softirq)
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++	CFI_STARTPROC
++	movq	%r15, R15(%rdi)
++	movq	%r14, R14(%rdi)
++	xchgq	%rsi, %rdx
++	movq	%r13, R13(%rdi)
++	movq	%r12, R12(%rdi)
++	xorl	%eax, %eax
++	movq	%rbp, RBP(%rdi)
++	movq	%rbx, RBX(%rdi)
++	movq	(%rsp), %rcx
++	movq	%rax, R11(%rdi)
++	movq	%rax, R10(%rdi)
++	movq	%rax, R9(%rdi)
++	movq	%rax, R8(%rdi)
++	movq	%rax, RAX(%rdi)
++	movq	%rax, RCX(%rdi)
++	movq	%rax, RDX(%rdi)
++	movq	%rax, RSI(%rdi)
++	movq	%rax, RDI(%rdi)
++	movq	%rax, ORIG_RAX(%rdi)
++	movq	%rcx, RIP(%rdi)
++	leaq	8(%rsp), %rcx
++	movq	$__KERNEL_CS, CS(%rdi)
++	movq	%rax, EFLAGS(%rdi)
++	movq	%rcx, RSP(%rdi)
++	movq	$__KERNEL_DS, SS(%rdi)
++	jmpq	*%rdx
++	CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/genapic-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/genapic-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Generic APIC sub-arch probe layer.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/ipi.h>
++
++#if defined(CONFIG_ACPI)
++#include <acpi/acpi_bus.h>
++#endif
++
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++extern struct genapic apic_cluster;
++extern struct genapic apic_flat;
++extern struct genapic apic_physflat;
++
++#ifndef CONFIG_XEN
++struct genapic *genapic = &apic_flat;
++#else
++extern struct genapic apic_xen;
++struct genapic *genapic = &apic_xen;
++#endif
++
++
++/*
++ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
++ */
++void __init clustered_apic_check(void)
++{
++#ifndef CONFIG_XEN
++	long i;
++	u8 clusters, max_cluster;
++	u8 id;
++	u8 cluster_cnt[NUM_APIC_CLUSTERS];
++	int max_apic = 0;
++
++#if defined(CONFIG_ACPI)
++	/*
++	 * Some x86_64 machines use physical APIC mode regardless of how many
++	 * procs/clusters are present (x86_64 ES7000 is an example).
++	 */
++	if (acpi_fadt.revision > FADT2_REVISION_ID)
++		if (acpi_fadt.force_apic_physical_destination_mode) {
++			genapic = &apic_cluster;
++			goto print;
++		}
++#endif
++
++	memset(cluster_cnt, 0, sizeof(cluster_cnt));
++	for (i = 0; i < NR_CPUS; i++) {
++		id = bios_cpu_apicid[i];
++		if (id == BAD_APICID)
++			continue;
++		if (id > max_apic)
++			max_apic = id;
++		cluster_cnt[APIC_CLUSTERID(id)]++;
++	}
++
++	/* Don't use clustered mode on AMD platforms. */
++ 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++		genapic = &apic_physflat;
++#ifndef CONFIG_HOTPLUG_CPU
++		/* In the CPU hotplug case we cannot use broadcast mode
++		   because that opens a race when a CPU is removed.
++		   Stay at physflat mode in this case.
++		   It is bad to do this unconditionally though. Once
++		   we have ACPI platform support for CPU hotplug
++		   we should detect hotplug capablity from ACPI tables and
++		   only do this when really needed. -AK */
++		if (max_apic <= 8)
++			genapic = &apic_flat;
++#endif
++ 		goto print;
++ 	}
++
++	clusters = 0;
++	max_cluster = 0;
++
++	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
++		if (cluster_cnt[i] > 0) {
++			++clusters;
++			if (cluster_cnt[i] > max_cluster)
++				max_cluster = cluster_cnt[i];
++		}
++	}
++
++	/*
++	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
++	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
++	 * else physical mode.
++	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
++	 * can ignore the clustered logical case and go straight to physical.)
++	 */
++	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
++#ifdef CONFIG_HOTPLUG_CPU
++		/* Don't use APIC shortcuts in CPU hotplug to avoid races */
++		genapic = &apic_physflat;
++#else
++		genapic = &apic_flat;
++#endif
++	} else
++		genapic = &apic_cluster;
++
++print:
++#else
++	/* hardcode to xen apic functions */
++	genapic = &apic_xen;
++#endif
++	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
++}
++
++/* Same for both flat and clustered. */
++
++#ifdef CONFIG_XEN
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++#endif
++
++void send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++	__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#else
++	xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#endif
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/genapic_xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/genapic_xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Xen APIC subarch code.  Maximum 8 CPUs, logical delivery.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ *
++ * Hacked to pieces for Xen by Chris Wright.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#include <asm/smp.h>
++#include <asm/ipi.h>
++#else
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/genapic.h>
++#endif
++#include <xen/evtchn.h>
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++	int irq = per_cpu(ipi_to_irq, cpu)[vector];
++	BUG_ON(irq < 0);
++	notify_remote_via_irq(irq);
++}
++
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++{
++	int cpu;
++
++	switch (shortcut) {
++	case APIC_DEST_SELF:
++		__send_IPI_one(smp_processor_id(), vector);
++		break;
++	case APIC_DEST_ALLBUT:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu == smp_processor_id())
++				continue;
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	case APIC_DEST_ALLINC:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	default:
++		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++		       vector);
++		break;
++	}
++}
++
++static cpumask_t xen_target_cpus(void)
++{
++	return cpu_online_map;
++}
++
++/*
++ * Set up the logical destination ID.
++ * Do nothing, not called now.
++ */
++static void xen_init_apic_ldr(void)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	return;
++}
++
++static void xen_send_IPI_allbutself(int vector)
++{
++	/*
++	 * if there are no other CPUs in the system then
++	 * we get an APIC send error if we try to broadcast.
++	 * thus we have to avoid sending IPIs in this case.
++	 */
++	Dprintk("%s\n", __FUNCTION__);
++	if (num_online_cpus() > 1)
++		xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_all(int vector)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
++{
++	unsigned long mask = cpus_addr(cpumask)[0];
++	unsigned int cpu;
++	unsigned long flags;
++
++	Dprintk("%s\n", __FUNCTION__);
++	local_irq_save(flags);
++	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++		if (cpu_isset(cpu, cpumask)) {
++			__send_IPI_one(cpu, vector);
++		}
++	}
++	local_irq_restore(flags);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int xen_apic_id_registered(void)
++{
++	/* better be set */
++	Dprintk("%s\n", __FUNCTION__);
++	return physid_isset(smp_processor_id(), phys_cpu_present_map);
++}
++#endif
++
++static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
++}
++
++static unsigned int phys_pkg_id(int index_msb)
++{
++	u32 ebx;
++
++	Dprintk("%s\n", __FUNCTION__);
++	ebx = cpuid_ebx(1);
++	return ((ebx >> 24) & 0xFF) >> index_msb;
++}
++
++struct genapic apic_xen =  {
++	.name = "xen",
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	.int_delivery_mode = dest_LowestPrio,
++#endif
++	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
++	.int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
++	.target_cpus = xen_target_cpus,
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	.apic_id_registered = xen_apic_id_registered,
++#endif
++	.init_apic_ldr = xen_init_apic_ldr,
++	.send_IPI_all = xen_send_IPI_all,
++	.send_IPI_allbutself = xen_send_IPI_allbutself,
++	.send_IPI_mask = xen_send_IPI_mask,
++	.cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
++	.phys_pkg_id = phys_pkg_id,
++};
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/head-xen.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/head-xen.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,214 @@
++/*
++ *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
++ *
++ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
++ *  Copyright (C) 2000 Pavel Machek <pavel at suse.cz>
++ *  Copyright (C) 2000 Karsten Keil <kkeil at suse.de>
++ *  Copyright (C) 2001,2002 Andi Kleen <ak at suse.de>
++ *
++ *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *    Modified for Xen                                
++ */
++
++
++#include <linux/linkage.h>
++#include <linux/threads.h>
++#include <linux/init.h>
++#include <linux/elfnote.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/msr.h>
++#include <asm/cache.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/elfnote.h>
++
++	.section .bootstrap.text, "ax", @progbits
++	.code64
++	.globl startup_64
++startup_64:
++	movq $(init_thread_union+THREAD_SIZE-8),%rsp
++
++	/* rsi is pointer to startup info structure.
++	   pass it to C */
++	movq %rsi,%rdi
++	pushq $0		# fake return address
++	jmp x86_64_start_kernel
++
++#ifdef CONFIG_ACPI_SLEEP
++.org 0xf00
++	.globl pGDT32
++pGDT32:
++	.word	gdt_end-cpu_gdt_table-1
++	.long	cpu_gdt_table-__START_KERNEL_map
++#endif
++ENTRY(stext)
++ENTRY(_stext)
++
++	$page = 0
++#define NEXT_PAGE(name) \
++	$page = $page + 1; \
++	.org $page * 0x1000; \
++	phys_##name = $page * 0x1000 + __PHYSICAL_START; \
++ENTRY(name)
++
++NEXT_PAGE(init_level4_pgt)
++	/* This gets initialized in x86_64_start_kernel */
++	.fill	512,8,0
++NEXT_PAGE(init_level4_user_pgt)
++        /*
++         * We update two pgd entries to make kernel and user pgd consistent
++         * at pgd_populate(). It can be used for kernel modules. So we place 
++         * this page here for those cases to avoid memory corruption.
++         * We also use this page to establish the initial mapping for the
++         * vsyscall area.
++         */
++	.fill	512,8,0
++
++NEXT_PAGE(level3_kernel_pgt)
++	.fill	512,8,0
++
++        /*
++         * This is used for vsyscall area mapping as we have a different
++         * level4 page table for user.
++         */
++NEXT_PAGE(level3_user_pgt)
++        .fill	512,8,0
++
++NEXT_PAGE(level2_kernel_pgt)
++	.fill	512,8,0
++
++NEXT_PAGE(hypercall_page)
++	CFI_STARTPROC
++	.rept 0x1000 / 0x20
++	.skip 1 /* push %rcx */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_REL_OFFSET	rcx,0
++	.skip 2 /* push %r11 */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_REL_OFFSET	rcx,0
++	.skip 5 /* mov $#,%eax */
++	.skip 2 /* syscall */
++	.skip 2 /* pop %r11 */
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE r11
++	.skip 1 /* pop %rcx */
++	CFI_ADJUST_CFA_OFFSET -8
++	CFI_RESTORE rcx
++	.align 0x20,0 /* ret */
++	.endr
++	CFI_ENDPROC
++
++#undef NEXT_PAGE
++
++	.data
++/* Just dummy symbol to allow compilation. Not used in sleep path */
++#ifdef CONFIG_ACPI_SLEEP
++	.align PAGE_SIZE
++ENTRY(wakeup_level4_pgt)
++	.fill	512,8,0
++#endif
++
++	.data
++
++	.align 16
++	.globl cpu_gdt_descr
++cpu_gdt_descr:
++	.word	gdt_end-cpu_gdt_table-1
++gdt:
++	.quad	cpu_gdt_table
++#ifdef CONFIG_SMP
++	.rept	NR_CPUS-1
++	.word	0
++	.quad	0
++	.endr
++#endif
++
++/* We need valid kernel segments for data and code in long mode too
++ * IRET will check the segment types  kkeil 2000/10/28
++ * Also sysret mandates a special GDT layout 
++ */
++		 		
++	.section .data.page_aligned, "aw"
++	.align PAGE_SIZE
++
++/* The TLS descriptors are currently at a different place compared to i386.
++   Hopefully nobody expects them at a fixed place (Wine?) */
++
++ENTRY(cpu_gdt_table)
++	.quad	0x0000000000000000	/* NULL descriptor */
++	.quad	0x0			/* unused */
++	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
++	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
++	.quad	0x00cffa000000ffff	/* __USER32_CS */
++	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */
++	.quad	0x00affa000000ffff	/* __USER_CS */
++	.quad	0x00cf9a000000ffff	/* __KERNEL32_CS */
++	.quad	0,0			/* TSS */
++	.quad	0,0			/* LDT */
++	.quad   0,0,0			/* three TLS descriptors */
++	.quad	0			/* unused */
++gdt_end:
++	/* asm/segment.h:GDT_ENTRIES must match this */
++	/* This should be a multiple of the cache line size */
++	/* GDTs of other CPUs are now dynamically allocated */
++
++	/* zero the remaining page */
++	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++
++	.section .bss.page_aligned, "aw", @nobits
++	.align PAGE_SIZE
++ENTRY(empty_zero_page)
++	.skip PAGE_SIZE
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoh value
++ .if (\value) < 0 || (\value) >= 0x10
++	utoh (((\value)>>4)&0x0fffffffffffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++  .byte '0' + ((\value) & 0xf)
++ .else
++  .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0x"
++		utoh __START_KERNEL_map
++	.ascii	",ELF_PADDR_OFFSET=0x"
++		utoh __START_KERNEL_map
++	.ascii	",VIRT_ENTRY=0x"
++		utoh (__START_KERNEL_map + __PHYSICAL_START)
++	.ascii	",HYPERCALL_PAGE=0x"
++		utoh (phys_hypercall_page >> PAGE_SHIFT)
++	.ascii  ",FEATURES=writable_page_tables"
++	.ascii		 "|writable_descriptor_tables"
++	.ascii		 "|auto_translated_physmap"
++	.ascii	         "|supervisor_mode_kernel"
++	.ascii	",LOADER=generic"
++	.byte	0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++	
++	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz, "linux")
++	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz, "2.6")
++	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz, "xen-3.0")
++	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      .quad,  __START_KERNEL_map)
++#if CONFIG_XEN_COMPAT <= 0x030002
++	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .quad,  __START_KERNEL_map)
++#else
++	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .quad,  0)
++#endif
++	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .quad,  startup_64)
++	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad,  hypercall_page)
++	ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,   .quad,  _PAGE_PRESENT,_PAGE_PRESENT)
++	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
++	ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long,  1)
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/head64-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/head64-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,162 @@
++/*
++ *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
++ *
++ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *	Modified for Xen.
++ */
++
++#include <linux/init.h>
++#include <linux/linkage.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++#include <asm/processor.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/bootsetup.h>
++#include <asm/setup.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++
++unsigned long start_pfn;
++
++/* Don't add a printk in there. printk relies on the PDA which is not initialized 
++   yet. */
++#if 0
++static void __init clear_bss(void)
++{
++	memset(__bss_start, 0,
++	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
++}
++#endif
++
++#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
++#define OLD_CL_MAGIC_ADDR	0x90020
++#define OLD_CL_MAGIC            0xA33F
++#define OLD_CL_BASE_ADDR        0x90000
++#define OLD_CL_OFFSET           0x90022
++
++extern char saved_command_line[];
++
++static void __init copy_bootdata(char *real_mode_data)
++{
++#ifndef CONFIG_XEN
++	int new_data;
++	char * command_line;
++
++	memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
++	new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++	if (!new_data) {
++		if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++			printk("so old bootloader that it does not support commandline?!\n");
++			return;
++		}
++		new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++		printk("old bootloader convention, maybe loadlin?\n");
++	}
++	command_line = (char *) ((u64)(new_data));
++	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++#else
++	int max_cmdline;
++	
++	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++		max_cmdline = COMMAND_LINE_SIZE;
++	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++	saved_command_line[max_cmdline-1] = '\0';
++#endif
++	printk("Bootdata ok (command line is %s)\n", saved_command_line);
++}
++
++static void __init setup_boot_cpu_data(void)
++{
++	unsigned int dummy, eax;
++
++	/* get vendor info */
++	cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++
++	/* get cpu type */
++	cpuid(1, &eax, &dummy, &dummy,
++		(unsigned int *) &boot_cpu_data.x86_capability);
++	boot_cpu_data.x86 = (eax >> 8) & 0xf;
++	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
++	boot_cpu_data.x86_mask = eax & 0xf;
++}
++
++#include <xen/interface/memory.h>
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init x86_64_start_kernel(char * real_mode_data)
++{
++	struct xen_machphys_mapping mapping;
++	unsigned long machine_to_phys_nr_ents;
++	char *s;
++	int i;
++
++	setup_xen_features();
++
++	xen_start_info = (struct start_info *)real_mode_data;
++	if (!xen_feature(XENFEAT_auto_translated_physmap))
++		phys_to_machine_mapping =
++			(unsigned long *)xen_start_info->mfn_list;
++	start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
++		xen_start_info->nr_pt_frames;
++
++	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++		machine_to_phys_nr_ents = mapping.max_mfn + 1;
++	}
++	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++		machine_to_phys_order++;
++
++#if 0
++	for (i = 0; i < 256; i++)
++		set_intr_gate(i, early_idt_handler);
++	asm volatile("lidt %0" :: "m" (idt_descr));
++#endif
++
++	/*
++	 * This must be called really, really early:
++	 */
++	lockdep_init();
++
++ 	for (i = 0; i < NR_CPUS; i++)
++ 		cpu_pda(i) = &boot_cpu_pda[i];
++
++	pda_init(0);
++	copy_bootdata(real_mode_data);
++#ifdef CONFIG_SMP
++	cpu_set(0, cpu_online_map);
++#endif
++	s = strstr(saved_command_line, "earlyprintk=");
++	if (s != NULL)
++		setup_early_printk(strchr(s, '=') + 1);
++#ifdef CONFIG_NUMA
++	s = strstr(saved_command_line, "numa=");
++	if (s != NULL)
++		numa_setup(s+5);
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	if (strstr(saved_command_line, "disableapic"))
++		disable_apic = 1;
++#endif
++	/* You need early console to see that */
++	if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
++		panic("Kernel too big for kernel mapping\n");
++
++	setup_boot_cpu_data();
++	start_kernel();
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/init_task.c
+--- a/arch/x86_64/kernel/init_task.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/init_task.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -37,6 +37,8 @@
+ struct task_struct init_task = INIT_TASK(init_task);
+ 
+ EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
+ /*
+  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+  * no more per-task TSS's. The TSS size is kept cacheline-aligned
+@@ -50,5 +52,6 @@
+  * debugging, no special alignment required.
+  */
+ DEFINE_PER_CPU(struct orig_ist, orig_ist);
++#endif
+ 
+ #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/io_apic-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/io_apic-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2263 @@
++/*
++ *	Intel IO-APIC support for multi-Pentium hosts.
++ *
++ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ *	Many thanks to Stig Venaas for trying out countless experimental
++ *	patches and reporting/debugging problems patiently!
++ *
++ *	(c) 1999, Multiple IO-APIC support, developed by
++ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
++ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
++ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
++ *	and Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively
++ *	Paul Diefenbaugh	:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/sysdev.h>
++#ifdef CONFIG_ACPI
++#include <acpi/acpi_bus.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/mach_apic.h>
++#include <asm/acpi.h>
++#include <asm/dma.h>
++#include <asm/nmi.h>
++
++#define __apicdebuginit  __init
++
++int sis_apic_bug; /* not actually supported, dummy for compile */
++
++static int no_timer_check;
++
++int disable_timer_pin_1 __initdata;
++
++#ifndef CONFIG_XEN
++int timer_over_8254 __initdata = 0;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++#endif
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++	short apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) 	\
++	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector)	(vector)
++#endif
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++#include <xen/evtchn.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq)  ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++	struct physdev_apic apic_op;
++	int ret;
++
++	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++	apic_op.reg = reg;
++	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++	if (ret)
++		return ret;
++	return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	struct physdev_apic apic_op;
++
++	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++	apic_op.reg = reg;
++	apic_op.value = value;
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r)    xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++
++#else
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
++{
++	unsigned long flags;
++	unsigned int dest;
++	cpumask_t tmp;
++
++	cpus_and(tmp, mask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
++
++	cpus_and(mask, tmp, CPU_MASK_ALL);
++
++	dest = cpu_mask_to_apicid(mask);
++
++	/*
++	 * Only the high 8 bits are valid.
++	 */
++	dest = SET_APIC_LOGICAL_ID(dest);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__DO_ACTION(1, = dest, )
++	set_irq_info(irq, mask);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++	static int first_free_entry = NR_IRQS;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++
++	BUG_ON(irq >= NR_IRQS);
++	while (entry->next)
++		entry = irq_2_pin + entry->next;
++
++	if (entry->pin != -1) {
++		entry->next = first_free_entry;
++		entry = irq_2_pin + entry->next;
++		if (++first_free_entry >= PIN_MAP_SIZE)
++			panic("io_apic.c: ran out of irq_2_pin entries!");
++	}
++	entry->apic = apic;
++	entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL)					\
++									\
++{									\
++	int pin;							\
++	struct irq_pin_list *entry = irq_2_pin + irq;			\
++									\
++	BUG_ON(irq >= NR_IRQS);						\
++	for (;;) {							\
++		unsigned int reg;					\
++		pin = entry->pin;					\
++		if (pin == -1)						\
++			break;						\
++		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
++		reg ACTION;						\
++		io_apic_modify(entry->apic, reg);			\
++		if (!entry->next)					\
++			break;						\
++		entry = irq_2_pin + entry->next;			\
++	}								\
++	FINAL;								\
++}
++
++#define DO_ACTION(name,R,ACTION, FINAL)					\
++									\
++	static void name##_IO_APIC_irq (unsigned int irq)		\
++	__DO_ACTION(R, ACTION, FINAL)
++
++DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
++						/* mask = 1 */
++DO_ACTION( __unmask,           0, &= 0xfffeffff, )
++						/* mask = 0 */
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__mask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	/* Check delivery_mode to be sure we're not clearing an SMI pin */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	if (entry.delivery_mode == dest_SMI)
++		return;
++	/*
++	 * Disable it in the IO-APIC irq-routing table:
++	 */
++	memset(&entry, 0, sizeof(entry));
++	entry.mask = 1;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++	int apic, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++)
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++			clear_IO_APIC_pin(apic, pin);
++}
++
++#endif /* !CONFIG_XEN */
++
++static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++int ioapic_force;
++
++/* dummy parsing: see setup.c */
++
++static int __init disable_ioapic_setup(char *str)
++{
++	skip_ioapic_setup = 1;
++	return 1;
++}
++
++static int __init enable_ioapic_setup(char *str)
++{
++	ioapic_force = 1;
++	skip_ioapic_setup = 0;
++	return 1;
++}
++
++__setup("noapic", disable_ioapic_setup);
++__setup("apic", enable_ioapic_setup);
++
++#ifndef CONFIG_XEN
++static int __init setup_disable_8254_timer(char *s)
++{
++	timer_over_8254 = -1;
++	return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++	timer_over_8254 = 2;
++	return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++#endif /* !CONFIG_XEN */
++
++#include <asm/pci-direct.h>
++#include <linux/pci_ids.h>
++#include <linux/pci.h>
++
++
++#ifdef CONFIG_ACPI
++
++static int nvidia_hpet_detected __initdata;
++
++static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
++{
++	nvidia_hpet_detected = 1;
++	return 0;
++}
++#endif
++
++/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
++   off. Check for an Nvidia or VIA PCI bridge and turn it off.
++   Use pci direct infrastructure because this runs before the PCI subsystem. 
++
++   Can be overwritten with "apic"
++
++   And another hack to disable the IOMMU on VIA chipsets.
++
++   ... and others. Really should move this somewhere else.
++
++   Kludge-O-Rama. */
++void __init check_ioapic(void) 
++{ 
++	int num,slot,func; 
++	/* Poor man's PCI discovery */
++	for (num = 0; num < 32; num++) { 
++		for (slot = 0; slot < 32; slot++) { 
++			for (func = 0; func < 8; func++) { 
++				u32 class;
++				u32 vendor;
++				u8 type;
++				class = read_pci_config(num,slot,func,
++							PCI_CLASS_REVISION);
++				if (class == 0xffffffff)
++					break; 
++
++		       		if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
++					continue; 
++
++				vendor = read_pci_config(num, slot, func, 
++							 PCI_VENDOR_ID);
++				vendor &= 0xffff;
++				switch (vendor) { 
++				case PCI_VENDOR_ID_VIA:
++#ifdef CONFIG_IOMMU
++					if ((end_pfn > MAX_DMA32_PFN ||
++					     force_iommu) &&
++					    !iommu_aperture_allowed) {
++						printk(KERN_INFO
++    "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
++						iommu_aperture_disabled = 1;
++					}
++#endif
++					return;
++				case PCI_VENDOR_ID_NVIDIA:
++#ifdef CONFIG_ACPI
++					/*
++					 * All timer overrides on Nvidia are
++					 * wrong unless HPET is enabled.
++					 */
++					nvidia_hpet_detected = 0;
++					acpi_table_parse(ACPI_HPET,
++							nvidia_hpet_check);
++					if (nvidia_hpet_detected == 0) {
++						acpi_skip_timer_override = 1;
++						printk(KERN_INFO "Nvidia board "
++						    "detected. Ignoring ACPI "
++						    "timer override.\n");
++					}
++#endif
++					/* RED-PEN skip them on mptables too? */
++					return;
++				case PCI_VENDOR_ID_ATI:
++
++				/* This should be actually default, but
++				   for 2.6.16 let's do it for ATI only where
++				   it's really needed. */
++#ifndef CONFIG_XEN
++					if (timer_over_8254 == 1) {	
++						timer_over_8254 = 0;	
++					printk(KERN_INFO
++		"ATI board detected. Disabling timer routing over 8254.\n");
++					}	
++#endif
++					return;
++				} 
++
++
++				/* No multi-function device? */
++				type = read_pci_config_byte(num,slot,func,
++							    PCI_HEADER_TYPE);
++				if (!(type & 0x80))
++					break;
++			} 
++		}
++	}
++} 
++
++static int __init ioapic_pirq_setup(char *str)
++{
++	int i, max;
++	int ints[MAX_PIRQS+1];
++
++	get_options(str, ARRAY_SIZE(ints), ints);
++
++	for (i = 0; i < MAX_PIRQS; i++)
++		pirq_entries[i] = -1;
++
++	pirqs_enabled = 1;
++	apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
++	max = MAX_PIRQS;
++	if (ints[0] < MAX_PIRQS)
++		max = ints[0];
++
++	for (i = 0; i < max; i++) {
++		apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++		/*
++		 * PIRQs are mapped upside down, usually.
++		 */
++		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++	}
++	return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++)
++		if (mp_irqs[i].mpc_irqtype == type &&
++		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++		    mp_irqs[i].mpc_dstirq == pin)
++			return i;
++
++	return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++
++			return mp_irqs[i].mpc_dstirq;
++	}
++	return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++			break;
++	}
++	if (i < mp_irq_entries) {
++		int apic;
++		for(apic = 0; apic < nr_ioapics; apic++) {
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++				return apic;
++		}
++	}
++
++	return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++	int apic, i, best_guess = -1;
++
++	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
++		bus, slot, pin);
++	if (mp_bus_id_to_pci_bus[bus] == -1) {
++		apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++		return -1;
++	}
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		for (apic = 0; apic < nr_ioapics; apic++)
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++				break;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++		    !mp_irqs[i].mpc_irqtype &&
++		    (bus == lbus) &&
++		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++			if (!(apic || IO_APIC_IRQ(irq)))
++				continue;
++
++			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++				return irq;
++			/*
++			 * Use the first all-but-pin matching entry as a
++			 * best-guess fuzzy result for broken mptables.
++			 */
++			if (best_guess < 0)
++				best_guess = irq;
++		}
++	}
++	BUG_ON(best_guess >= NR_IRQS);
++	return best_guess;
++}
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++	if (irq < 16) {
++		unsigned int port = 0x4d0 + (irq >> 3);
++		return (inb(port) >> (irq & 7)) & 1;
++	}
++	apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
++	return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value.  If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx)	(0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx)	(0)
++#define default_ISA_polarity(idx)	(0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx)	(1)
++#define default_PCI_polarity(idx)	(1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx)	(1)
++#define default_MCA_polarity(idx)	(0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int polarity;
++
++	/*
++	 * Determine IRQ line polarity (high active or low active):
++	 */
++	switch (mp_irqs[idx].mpc_irqflag & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent polarity */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					polarity = default_ISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					polarity = default_EISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					polarity = default_PCI_polarity(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					polarity = default_MCA_polarity(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					polarity = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* high active */
++		{
++			polarity = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++		case 3: /* low active */
++		{
++			polarity = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++	}
++	return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int trigger;
++
++	/*
++	 * Determine IRQ trigger mode (edge or level sensitive):
++	 */
++	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					trigger = default_ISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					trigger = default_EISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					trigger = default_PCI_trigger(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					trigger = default_MCA_trigger(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					trigger = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* edge */
++		{
++			trigger = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 1;
++			break;
++		}
++		case 3: /* level */
++		{
++			trigger = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 0;
++			break;
++		}
++	}
++	return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++	return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++	return MPBIOS_trigger(idx);
++}
++
++static int next_irq = 16;
++
++/*
++ * gsi_irq_sharing -- Name overload!  "irq" can be either a legacy IRQ
++ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
++ * from ACPI, which can reach 800 in large boxen.
++ *
++ * Compact the sparse GSI space into a sequential IRQ series and reuse
++ * vectors if possible.
++ */
++int gsi_irq_sharing(int gsi)
++{
++	int i, tries, vector;
++
++	BUG_ON(gsi >= NR_IRQ_VECTORS);
++
++	if (platform_legacy_irq(gsi))
++		return gsi;
++
++	if (gsi_2_irq[gsi] != 0xFF)
++		return (int)gsi_2_irq[gsi];
++
++	tries = NR_IRQS;
++  try_again:
++	vector = assign_irq_vector(gsi);
++
++	/*
++	 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
++	 * use of vector and if found, return that IRQ.  However, we never want
++	 * to share legacy IRQs, which usually have a different trigger mode
++	 * than PCI.
++	 */
++	for (i = 0; i < NR_IRQS; i++)
++		if (IO_APIC_VECTOR(i) == vector)
++			break;
++	if (platform_legacy_irq(i)) {
++		if (--tries >= 0) {
++			IO_APIC_VECTOR(i) = 0;
++			goto try_again;
++		}
++		panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
++	}
++	if (i < NR_IRQS) {
++		gsi_2_irq[gsi] = i;
++		printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
++				gsi, vector, i);
++		return i;
++	}
++
++	i = next_irq++;
++	BUG_ON(i >= NR_IRQS);
++	gsi_2_irq[gsi] = i;
++	IO_APIC_VECTOR(i) = vector;
++	printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
++			gsi, vector, i);
++	return i;
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++	int irq, i;
++	int bus = mp_irqs[idx].mpc_srcbus;
++
++	/*
++	 * Debugging check, we are in big trouble if this message pops up!
++	 */
++	if (mp_irqs[idx].mpc_dstirq != pin)
++		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++	switch (mp_bus_id_to_type[bus])
++	{
++		case MP_BUS_ISA: /* ISA pin */
++		case MP_BUS_EISA:
++		case MP_BUS_MCA:
++		{
++			irq = mp_irqs[idx].mpc_srcbusirq;
++			break;
++		}
++		case MP_BUS_PCI: /* PCI pin */
++		{
++			/*
++			 * PCI IRQs are mapped in order
++			 */
++			i = irq = 0;
++			while (i < apic)
++				irq += nr_ioapic_registers[i++];
++			irq += pin;
++			irq = gsi_irq_sharing(irq);
++			break;
++		}
++		default:
++		{
++			printk(KERN_ERR "unknown bus type %d.\n",bus); 
++			irq = 0;
++			break;
++		}
++	}
++	BUG_ON(irq >= NR_IRQS);
++
++	/*
++	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
++	 */
++	if ((pin >= 16) && (pin <= 23)) {
++		if (pirq_entries[pin-16] != -1) {
++			if (!pirq_entries[pin-16]) {
++				apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
++			} else {
++				irq = pirq_entries[pin-16];
++				apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
++						pin-16, irq);
++			}
++		}
++	}
++	BUG_ON(irq >= NR_IRQS);
++	return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++	int apic, idx, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			idx = find_irq_entry(apic,pin,mp_INT);
++			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++				return irq_trigger(idx);
++		}
++	}
++	/*
++	 * nonexistent IRQs are edge default
++	 */
++	return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++
++int assign_irq_vector(int irq)
++{
++	unsigned long flags;
++	int vector;
++	struct physdev_irq irq_op;
++  
++  	BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++	spin_lock_irqsave(&vector_lock, flags);
++
++  	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++		spin_unlock_irqrestore(&vector_lock, flags);
++  		return IO_APIC_VECTOR(irq);
++	}
++
++	irq_op.irq = irq;
++	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++		spin_unlock_irqrestore(&vector_lock, flags);
++		return -ENOSPC;
++	}
++
++	vector = irq_op.vector;
++	vector_irq[vector] = irq;
++	if (irq != AUTO_ASSIGN)
++		IO_APIC_VECTOR(irq) = vector;
++
++	spin_unlock_irqrestore(&vector_lock, flags);
++
++	return vector;
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO	-1
++#define IOAPIC_EDGE	0
++#define IOAPIC_LEVEL	1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++	unsigned idx;
++
++	idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++			trigger == IOAPIC_LEVEL)
++		irq_desc[idx].chip = &ioapic_level_type;
++	else
++		irq_desc[idx].chip = &ioapic_edge_type;
++	set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
++#endif /* !CONFIG_XEN */
++
++static void __init setup_IO_APIC_irqs(void)
++{
++	struct IO_APIC_route_entry entry;
++	int apic, pin, idx, irq, first_notcon = 1, vector;
++	unsigned long flags;
++
++	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++		/*
++		 * add it to the IO-APIC irq-routing table:
++		 */
++		memset(&entry,0,sizeof(entry));
++
++		entry.delivery_mode = INT_DELIVERY_MODE;
++		entry.dest_mode = INT_DEST_MODE;
++		entry.mask = 0;				/* enable IRQ */
++		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++		idx = find_irq_entry(apic,pin,mp_INT);
++		if (idx == -1) {
++			if (first_notcon) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++				first_notcon = 0;
++			} else
++				apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++			continue;
++		}
++
++		entry.trigger = irq_trigger(idx);
++		entry.polarity = irq_polarity(idx);
++
++		if (irq_trigger(idx)) {
++			entry.trigger = 1;
++			entry.mask = 1;
++			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++		}
++
++		irq = pin_2_irq(idx, apic, pin);
++		add_pin_to_irq(irq, apic, pin);
++
++		if (/* !apic && */ !IO_APIC_IRQ(irq))
++			continue;
++
++		if (IO_APIC_IRQ(irq)) {
++			vector = assign_irq_vector(irq);
++			entry.vector = vector;
++
++			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++			if (!apic && (irq < 16))
++				disable_8259A_irq(irq);
++		}
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++		set_native_irq_info(irq, TARGET_CPUS);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++	}
++	}
++
++	if (!first_notcon)
++		apic_printk(APIC_VERBOSE," not connected.\n");
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Set up the 8259A-master output pin as broadcast to all
++ * CPUs.
++ */
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	memset(&entry,0,sizeof(entry));
++
++	disable_8259A_irq(0);
++
++	/* mask LVT0 */
++	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++	/*
++	 * We use logical delivery to get the timer IRQ
++	 * to the first CPU.
++	 */
++	entry.dest_mode = INT_DEST_MODE;
++	entry.mask = 0;					/* unmask IRQ now */
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.polarity = 0;
++	entry.trigger = 0;
++	entry.vector = vector;
++
++	/*
++	 * The timer IRQ doesn't have to know that behind the
++	 * scene we have a 8259A-master in AEOI mode ...
++	 */
++	irq_desc[0].chip = &ioapic_edge_type;
++
++	/*
++	 * Add it to the IO-APIC irq-routing table:
++	 */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	enable_8259A_irq(0);
++}
++
++void __init UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __apicdebuginit print_IO_APIC(void)
++{
++	int apic, i;
++	union IO_APIC_reg_00 reg_00;
++	union IO_APIC_reg_01 reg_01;
++	union IO_APIC_reg_02 reg_02;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++	for (i = 0; i < nr_ioapics; i++)
++		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++	/*
++	 * We are a bit conservative about what we expect.  We have to
++	 * know about every hardware change ASAP.
++	 */
++	printk(KERN_INFO "testing the IO APIC.......................\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(apic, 0);
++	reg_01.raw = io_apic_read(apic, 1);
++	if (reg_01.bits.version >= 0x10)
++		reg_02.raw = io_apic_read(apic, 2);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	printk("\n");
++	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
++	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
++	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
++	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++		(reg_01.bits.entries != 0x2E) &&
++		(reg_01.bits.entries != 0x3F) &&
++		(reg_01.bits.entries != 0x03) 
++	)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
++	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
++	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++		(reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
++		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
++	)
++		UNEXPECTED_IO_APIC();
++	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	if (reg_01.bits.version >= 0x10) {
++		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
++		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++			UNEXPECTED_IO_APIC();
++	}
++
++	printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++			  " Stat Dest Deli Vect:   \n");
++
++	for (i = 0; i <= reg_01.bits.entries; i++) {
++		struct IO_APIC_route_entry entry;
++
++		spin_lock_irqsave(&ioapic_lock, flags);
++		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		printk(KERN_DEBUG " %02x %03X %02X  ",
++			i,
++			entry.dest.logical.logical_dest,
++			entry.dest.physical.physical_dest
++		);
++
++		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
++			entry.mask,
++			entry.trigger,
++			entry.irr,
++			entry.polarity,
++			entry.delivery_status,
++			entry.dest_mode,
++			entry.delivery_mode,
++			entry.vector
++		);
++	}
++	}
++	if (use_pci_vector())
++		printk(KERN_INFO "Using vector-based indexing\n");
++	printk(KERN_DEBUG "IRQ to pin mappings:\n");
++	for (i = 0; i < NR_IRQS; i++) {
++		struct irq_pin_list *entry = irq_2_pin + i;
++		if (entry->pin < 0)
++			continue;
++ 		if (use_pci_vector() && !platform_legacy_irq(i))
++			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++		else
++			printk(KERN_DEBUG "IRQ%d ", i);
++		for (;;) {
++			printk("-> %d:%d", entry->apic, entry->pin);
++			if (!entry->next)
++				break;
++			entry = irq_2_pin + entry->next;
++		}
++		printk("\n");
++	}
++
++	printk(KERN_INFO ".................................... done.\n");
++
++	return;
++}
++
++static __apicdebuginit void print_APIC_bitfield (int base)
++{
++	unsigned int v;
++	int i, j;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++	for (i = 0; i < 8; i++) {
++		v = apic_read(base + i*0x10);
++		for (j = 0; j < 32; j++) {
++			if (v & (1<<j))
++				printk("1");
++			else
++				printk("0");
++		}
++		printk("\n");
++	}
++}
++
++void __apicdebuginit print_local_APIC(void * dummy)
++{
++	unsigned int v, ver, maxlvt;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++		smp_processor_id(), hard_smp_processor_id());
++	v = apic_read(APIC_ID);
++	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
++	v = apic_read(APIC_LVR);
++	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++	ver = GET_APIC_VERSION(v);
++	maxlvt = get_maxlvt();
++
++	v = apic_read(APIC_TASKPRI);
++	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++	v = apic_read(APIC_ARBPRI);
++	printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++		v & APIC_ARBPRI_MASK);
++	v = apic_read(APIC_PROCPRI);
++	printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++
++	v = apic_read(APIC_EOI);
++	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++	v = apic_read(APIC_RRR);
++	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++	v = apic_read(APIC_LDR);
++	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++	v = apic_read(APIC_DFR);
++	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++	v = apic_read(APIC_SPIV);
++	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++	printk(KERN_DEBUG "... APIC ISR field:\n");
++	print_APIC_bitfield(APIC_ISR);
++	printk(KERN_DEBUG "... APIC TMR field:\n");
++	print_APIC_bitfield(APIC_TMR);
++	printk(KERN_DEBUG "... APIC IRR field:\n");
++	print_APIC_bitfield(APIC_IRR);
++
++	v = apic_read(APIC_ESR);
++	printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++
++	v = apic_read(APIC_ICR);
++	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++	v = apic_read(APIC_ICR2);
++	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++	v = apic_read(APIC_LVTT);
++	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++	if (maxlvt > 3) {                       /* PC is LVT#4. */
++		v = apic_read(APIC_LVTPC);
++		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++	}
++	v = apic_read(APIC_LVT0);
++	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++	v = apic_read(APIC_LVT1);
++	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++	if (maxlvt > 2) {			/* ERR is LVT#3. */
++		v = apic_read(APIC_LVTERR);
++		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++	}
++
++	v = apic_read(APIC_TMICT);
++	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++	v = apic_read(APIC_TMCCT);
++	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++	v = apic_read(APIC_TDCR);
++	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++	printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++	on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void __apicdebuginit print_PIC(void)
++{
++	unsigned int v;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++	spin_lock_irqsave(&i8259A_lock, flags);
++
++	v = inb(0xa1) << 8 | inb(0x21);
++	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
++
++	v = inb(0xa0) << 8 | inb(0x20);
++	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
++
++	outb(0x0b,0xa0);
++	outb(0x0b,0x20);
++	v = inb(0xa0) << 8 | inb(0x20);
++	outb(0x0a,0xa0);
++	outb(0x0a,0x20);
++
++	spin_unlock_irqrestore(&i8259A_lock, flags);
++
++	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
++
++	v = inb(0x4d1) << 8 | inb(0x4d0);
++	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++	union IO_APIC_reg_01 reg_01;
++#ifndef CONFIG_XEN
++	int i8259_apic, i8259_pin;
++#endif
++	int i, apic;
++	unsigned long flags;
++
++	for (i = 0; i < PIN_MAP_SIZE; i++) {
++		irq_2_pin[i].pin = -1;
++		irq_2_pin[i].next = 0;
++	}
++	if (!pirqs_enabled)
++		for (i = 0; i < MAX_PIRQS; i++)
++			pirq_entries[i] = -1;
++
++	/*
++	 * The number of IO-APIC IRQ registers (== #pins):
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_01.raw = io_apic_read(apic, 1);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++	}
++#ifndef CONFIG_XEN
++	for(apic = 0; apic < nr_ioapics; apic++) {
++		int pin;
++		/* See if any of the pins is in ExtINT mode */
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			struct IO_APIC_route_entry entry;
++			spin_lock_irqsave(&ioapic_lock, flags);
++			*(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++			*(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++			spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++			/* If the interrupt line is enabled and in ExtInt mode
++			 * I have found the pin where the i8259 is connected.
++			 */
++			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++				ioapic_i8259.apic = apic;
++				ioapic_i8259.pin  = pin;
++				goto found_i8259;
++			}
++		}
++	}
++ found_i8259:
++	/* Look to see what if the MP table has reported the ExtINT */
++	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
++	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++	/* Trust the MP table if nothing is setup in the hardware */
++	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++		ioapic_i8259.pin  = i8259_pin;
++		ioapic_i8259.apic = i8259_apic;
++	}
++	/* Complain if the MP table and the hardware disagree */
++	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++	{
++		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++	}
++#endif
++
++	/*
++	 * Do not trust the IO-APIC being empty at bootup
++	 */
++	clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++	/*
++	 * Clear the IO-APIC before rebooting:
++	 */
++	clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++	/*
++	 * If the i8259 is routed through an IOAPIC
++	 * Put that IOAPIC in virtual wire mode
++	 * so legacy interrupts can be delivered.
++	 */
++	if (ioapic_i8259.pin != -1) {
++		struct IO_APIC_route_entry entry;
++		unsigned long flags;
++
++		memset(&entry, 0, sizeof(entry));
++		entry.mask            = 0; /* Enabled */
++		entry.trigger         = 0; /* Edge */
++		entry.irr             = 0;
++		entry.polarity        = 0; /* High */
++		entry.delivery_status = 0;
++		entry.dest_mode       = 0; /* Physical */
++		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
++		entry.vector          = 0;
++		entry.dest.physical.physical_dest =
++					GET_APIC_ID(apic_read(APIC_ID));
++
++		/*
++		 * Add it to the IO-APIC irq-routing table:
++		 */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++			*(((int *)&entry)+1));
++		io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++			*(((int *)&entry)+0));
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++	}
++
++	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
++ */
++
++#ifndef CONFIG_XEN
++static void __init setup_ioapic_ids_from_mpc (void)
++{
++	union IO_APIC_reg_00 reg_00;
++	int apic;
++	int i;
++	unsigned char old_id;
++	unsigned long flags;
++
++	/*
++	 * Set the IOAPIC ID to the value stored in the MPC table.
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++		/* Read the register 0 value */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		
++		old_id = mp_ioapics[apic].mpc_apicid;
++
++
++		printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++
++
++		/*
++		 * We need to adjust the IRQ routing table
++		 * if the ID changed.
++		 */
++		if (old_id != mp_ioapics[apic].mpc_apicid)
++			for (i = 0; i < mp_irq_entries; i++)
++				if (mp_irqs[i].mpc_dstapic == old_id)
++					mp_irqs[i].mpc_dstapic
++						= mp_ioapics[apic].mpc_apicid;
++
++		/*
++		 * Read the right value from the MPC table and
++		 * write it into the ID register.
++	 	 */
++		apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
++				mp_ioapics[apic].mpc_apicid);
++
++		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0, reg_00.raw);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		/*
++		 * Sanity check
++		 */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++			printk("could not set ID!\n");
++		else
++			apic_printk(APIC_VERBOSE," ok.\n");
++	}
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ *	- timer IRQ defaults to IO-APIC IRQ
++ *	- if this function detects that timer IRQs are defunct, then we fall
++ *	  back to ISA timer IRQs
++ */
++#ifndef CONFIG_XEN
++static int __init timer_irq_works(void)
++{
++	unsigned long t1 = jiffies;
++
++	local_irq_enable();
++	/* Let ten ticks pass... */
++	mdelay((10 * 1000) / HZ);
++
++	/*
++	 * Expect a few ticks at least, to be sure some possible
++	 * glue logic does not lock up after one or two first
++	 * ticks in a non-ExtINT mode.  Also the local APIC
++	 * might have cached one ExtINT interrupt.  Finally, at
++	 * least one tick may be lost due to delays.
++	 */
++
++	/* jiffies wrap? */
++	if (jiffies - t1 > 4)
++		return 1;
++	return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++	int was_pending = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	if (irq < 16) {
++		disable_8259A_irq(irq);
++		if (i8259A_irq_pending(irq))
++			was_pending = 1;
++	}
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++	move_irq(irq);
++	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++					== (IRQ_PENDING | IRQ_DISABLED))
++		mask_IO_APIC_irq(irq);
++	ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++	unmask_IO_APIC_irq(irq);
++
++	return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++	move_irq(irq);
++	ack_APIC_irq();
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	move_native_irq(vector);
++	ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	move_native_irq(vector);
++	end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++					cpumask_t cpu_mask)
++{
++	int irq = vector_to_irq(vector);
++
++	set_native_irq_info(vector, cpu_mask);
++	set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif // CONFIG_SMP
++#endif // CONFIG_PCI_MSI
++
++static int ioapic_retrigger(unsigned int irq)
++{
++	send_IPI_self(IO_APIC_VECTOR(irq));
++
++	return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++	.typename = "IO-APIC-edge",
++	.startup 	= startup_edge_ioapic,
++	.shutdown 	= shutdown_edge_ioapic,
++	.enable 	= enable_edge_ioapic,
++	.disable 	= disable_edge_ioapic,
++	.ack 		= ack_edge_ioapic,
++	.end 		= end_edge_ioapic,
++#ifdef CONFIG_SMP
++	.set_affinity = set_ioapic_affinity,
++#endif
++	.retrigger	= ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++	.typename = "IO-APIC-level",
++	.startup 	= startup_level_ioapic,
++	.shutdown 	= shutdown_level_ioapic,
++	.enable 	= enable_level_ioapic,
++	.disable 	= disable_level_ioapic,
++	.ack 		= mask_and_ack_level_ioapic,
++	.end 		= end_level_ioapic,
++#ifdef CONFIG_SMP
++	.set_affinity = set_ioapic_affinity,
++#endif
++	.retrigger	= ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++	int irq;
++
++	/*
++	 * NOTE! The local APIC isn't very good at handling
++	 * multiple interrupts at the same interrupt level.
++	 * As the interrupt level is determined by taking the
++	 * vector number and shifting that right by 4, we
++	 * want to spread these out a bit so that they don't
++	 * all fall in the same interrupt level.
++	 *
++	 * Also, we've got to be careful not to trash gate
++	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
++	 */
++	for (irq = 0; irq < NR_IRQS ; irq++) {
++		int tmp = irq;
++		if (use_pci_vector()) {
++			if (!platform_legacy_irq(tmp))
++				if ((tmp = vector_to_irq(tmp)) == -1)
++					continue;
++		}
++		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++			/*
++			 * Hmm.. We don't have an entry for this,
++			 * so default to an old-fashioned 8259
++			 * interrupt if we can..
++			 */
++			if (irq < 16)
++				make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++			else
++				/* Strange. Oh, well.. */
++				irq_desc[irq].chip = &no_irq_type;
++#endif
++		}
++	}
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++	ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++	.typename = "local-APIC-edge",
++	.startup = NULL, /* startup_irq() not used for IRQ0 */
++	.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++	.enable = enable_lapic_irq,
++	.disable = disable_lapic_irq,
++	.ack = ack_lapic_irq,
++	.end = end_lapic_irq,
++};
++
++static void setup_nmi (void)
++{
++	/*
++ 	 * Dirty trick to enable the NMI watchdog ...
++	 * We put the 8259A master into AEOI mode and
++	 * unmask on all local APICs LVT0 as NMI.
++	 *
++	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++	 * is from Maciej W. Rozycki - so we do not have to EOI from
++	 * the NMI handler or the timer interrupt.
++	 */ 
++	printk(KERN_INFO "activating NMI Watchdog ...");
++
++	enable_NMI_through_LVT0(NULL);
++
++	printk(" done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
++ * not support the ExtINT mode, unfortunately.  We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA.  --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++	int apic, pin, i;
++	struct IO_APIC_route_entry entry0, entry1;
++	unsigned char save_control, save_freq_select;
++	unsigned long flags;
++
++	pin  = find_isa_irq_pin(8, mp_INT);
++	apic = find_isa_irq_apic(8, mp_INT);
++	if (pin == -1)
++		return;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++	*(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	clear_IO_APIC_pin(apic, pin);
++
++	memset(&entry1, 0, sizeof(entry1));
++
++	entry1.dest_mode = 0;			/* physical delivery */
++	entry1.mask = 0;			/* unmask IRQ now */
++	entry1.dest.physical.physical_dest = hard_smp_processor_id();
++	entry1.delivery_mode = dest_ExtINT;
++	entry1.polarity = entry0.polarity;
++	entry1.trigger = 0;
++	entry1.vector = 0;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	save_control = CMOS_READ(RTC_CONTROL);
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++		   RTC_FREQ_SELECT);
++	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++	i = 100;
++	while (i-- > 0) {
++		mdelay(10);
++		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++			i -= 10;
++	}
++
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++	clear_IO_APIC_pin(apic, pin);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
++ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ *
++ * FIXME: really need to revamp this for modern platforms only.
++ */
++static inline void check_timer(void)
++{
++	int apic1, pin1, apic2, pin2;
++	int vector;
++
++	/*
++	 * get/set the timer IRQ vector:
++	 */
++	disable_8259A_irq(0);
++	vector = assign_irq_vector(0);
++	set_intr_gate(vector, interrupt[0]);
++
++	/*
++	 * Subtle, code in do_timer_interrupt() expects an AEOI
++	 * mode for the 8259A whenever interrupts are routed
++	 * through I/O APICs.  Also IRQ0 has to be enabled in
++	 * the 8259A which implies the virtual wire has to be
++	 * disabled in the local APIC.
++	 */
++	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++	init_8259A(1);
++	if (timer_over_8254 > 0)
++		enable_8259A_irq(0);
++
++	pin1  = find_isa_irq_pin(0, mp_INT);
++	apic1 = find_isa_irq_apic(0, mp_INT);
++	pin2  = ioapic_i8259.pin;
++	apic2 = ioapic_i8259.apic;
++
++	if (pin1 == 0)
++		timer_uses_ioapic_pin_0 = 1;
++
++	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++		vector, apic1, pin1, apic2, pin2);
++
++	if (pin1 != -1) {
++		/*
++		 * Ok, does IRQ0 through the IOAPIC work?
++		 */
++		unmask_IO_APIC_irq(0);
++		if (!no_timer_check && timer_irq_works()) {
++			nmi_watchdog_default();
++			if (nmi_watchdog == NMI_IO_APIC) {
++				disable_8259A_irq(0);
++				setup_nmi();
++				enable_8259A_irq(0);
++			}
++			if (disable_timer_pin_1 > 0)
++				clear_IO_APIC_pin(0, pin1);
++			return;
++		}
++		clear_IO_APIC_pin(apic1, pin1);
++		apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
++				"connected to IO-APIC\n");
++	}
++
++	apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
++				"through the 8259A ... ");
++	if (pin2 != -1) {
++		apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
++			apic2, pin2);
++		/*
++		 * legacy devices should be connected to IO APIC #0
++		 */
++		setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++		if (timer_irq_works()) {
++			apic_printk(APIC_VERBOSE," works.\n");
++			nmi_watchdog_default();
++			if (nmi_watchdog == NMI_IO_APIC) {
++				setup_nmi();
++			}
++			return;
++		}
++		/*
++		 * Cleanup, just in case ...
++		 */
++		clear_IO_APIC_pin(apic2, pin2);
++	}
++	apic_printk(APIC_VERBOSE," failed.\n");
++
++	if (nmi_watchdog == NMI_IO_APIC) {
++		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++		nmi_watchdog = 0;
++	}
++
++	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++	disable_8259A_irq(0);
++	irq_desc[0].chip = &lapic_irq_type;
++	apic_write(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
++	enable_8259A_irq(0);
++
++	if (timer_irq_works()) {
++		apic_printk(APIC_VERBOSE," works.\n");
++		return;
++	}
++	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++	apic_printk(APIC_VERBOSE," failed.\n");
++
++	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++	init_8259A(0);
++	make_8259A_irq(0);
++	apic_write(APIC_LVT0, APIC_DM_EXTINT);
++
++	unlock_ExtINT_logic();
++
++	if (timer_irq_works()) {
++		apic_printk(APIC_VERBOSE," works.\n");
++		return;
++	}
++	apic_printk(APIC_VERBOSE," failed :(.\n");
++	panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++}
++#else
++#define check_timer() ((void)0)
++int timer_uses_ioapic_pin_0 = 0;
++#endif /* !CONFIG_XEN */
++
++static int __init notimercheck(char *s)
++{
++	no_timer_check = 1;
++	return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ *   Linux doesn't really care, as it's not actually used
++ *   for any interrupt handling anyway.
++ */
++#define PIC_IRQS	(1<<2)
++
++void __init setup_IO_APIC(void)
++{
++	enable_IO_APIC();
++
++	if (acpi_ioapic)
++		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
++	else
++		io_apic_irqs = ~PIC_IRQS;
++
++	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++
++	/*
++	 * Set up the IO-APIC IRQ routing table.
++	 */
++	if (!acpi_ioapic)
++		setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++	sync_Arb_IDs();
++#endif /* !CONFIG_XEN */
++	setup_IO_APIC_irqs();
++	init_IO_APIC_traps();
++	check_timer();
++	if (!acpi_ioapic)
++		print_IO_APIC();
++}
++
++struct sysfs_ioapic_data {
++	struct sys_device dev;
++	struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	int i;
++
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	union IO_APIC_reg_00 reg_00;
++	int i;
++
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(dev->id, 0);
++	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++		io_apic_write(dev->id, 0, reg_00.raw);
++	}
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++	set_kset_name("ioapic"),
++	.suspend = ioapic_suspend,
++	.resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++	struct sys_device * dev;
++	int i, size, error = 0;
++
++	error = sysdev_class_register(&ioapic_sysdev_class);
++	if (error)
++		return error;
++
++	for (i = 0; i < nr_ioapics; i++ ) {
++		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++			* sizeof(struct IO_APIC_route_entry);
++		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++		if (!mp_ioapic_data[i]) {
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++		memset(mp_ioapic_data[i], 0, size);
++		dev = &mp_ioapic_data[i]->dev;
++		dev->id = i;
++		dev->cls = &ioapic_sysdev_class;
++		error = sysdev_register(dev);
++		if (error) {
++			kfree(mp_ioapic_data[i]);
++			mp_ioapic_data[i] = NULL;
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++	}
++
++	return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++                          ACPI-based IOAPIC Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID		0xFE
++
++int __init io_apic_get_version (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	if (!IO_APIC_IRQ(irq)) {
++		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++			ioapic);
++		return -EINVAL;
++	}
++
++	/*
++	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++	 * Note that we mask (disable) IRQs now -- these get enabled when the
++	 * corresponding device driver registers for this IRQ.
++	 */
++
++	memset(&entry,0,sizeof(entry));
++
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.trigger = edge_level;
++	entry.polarity = active_high_low;
++	entry.mask = 1;					 /* Disabled (masked) */
++
++	irq = gsi_irq_sharing(irq);
++	/*
++	 * IRQs < 16 are already in the irq_2_pin[] map
++	 */
++	if (irq >= 16)
++		add_pin_to_irq(irq, ioapic, pin);
++
++	entry.vector = assign_irq_vector(irq);
++
++	apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
++		"IRQ %d Mode:%i Active:%i)\n", ioapic, 
++	       mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++	       edge_level, active_high_low);
++
++	ioapic_register_intr(irq, entry.vector, edge_level);
++
++	if (!ioapic && (irq < 16))
++		disable_8259A_irq(irq);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++	set_native_irq_info(use_pci_vector() ?  entry.vector : irq, TARGET_CPUS);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++#endif /* CONFIG_ACPI */
++
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++void __init setup_ioapic_dest(void)
++{
++	int pin, ioapic, irq, irq_entry;
++
++	if (skip_ioapic_setup == 1)
++		return;
++
++	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++			if (irq_entry == -1)
++				continue;
++			irq = pin_2_irq(irq_entry, ioapic, pin);
++			set_ioapic_affinity_irq(irq, TARGET_CPUS);
++		}
++
++	}
++}
++#endif
++#endif /* !CONFIG_XEN */
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/ioport-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/ioport-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,100 @@
++/*
++ *	linux/arch/x86_64/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++	int i;
++
++	if (new_value)
++		for (i = base; i < base + extent; i++)
++			__set_bit(i, bitmap);
++	else
++		for (i = base; i < base + extent; i++)
++			clear_bit(i, bitmap);
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++	struct thread_struct * t = &current->thread;
++	unsigned long *bitmap;
++	struct physdev_set_iobitmap set_iobitmap;
++
++	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++		return -EINVAL;
++	if (turn_on && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/*
++	 * If it's the first ioperm() call in this thread's lifetime, set the
++	 * IO bitmap up. ioperm() is much less timing critical than clone(),
++	 * this is why we delay this operation until now:
++	 */
++	if (!t->io_bitmap_ptr) {
++		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!bitmap)
++			return -ENOMEM;
++
++		memset(bitmap, 0xff, IO_BITMAP_BYTES);
++		t->io_bitmap_ptr = bitmap;
++
++		set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++		set_iobitmap.nr_ports = IO_BITMAP_BITS;
++		WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++					      &set_iobitmap));
++	}
++
++	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++	return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ */
++
++asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++{
++	unsigned int old_iopl = current->thread.iopl;
++	struct physdev_set_iopl set_iopl;
++
++	if (new_iopl > 3)
++		return -EINVAL;
++
++	/* Need "raw I/O" privileges for direct port access. */
++	if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/* Change our version of the privilege levels. */
++	current->thread.iopl = new_iopl;
++
++	/* Force the change at ring 0. */
++	set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++	return 0;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/irq-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/irq-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,197 @@
++/*
++ *	linux/arch/x86_64/kernel/irq.c
++ *
++ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86_64-specific interrupt
++ * entry and irq statistics code. All the remaining irq logic is
++ * done by the generic kernel/irq/ code and in the
++ * x86_64-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/interrupt.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <asm/uaccess.h>
++#include <asm/io_apic.h>
++#include <asm/idle.h>
++
++atomic_t irq_err_count;
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++atomic_t irq_mis_count;
++#endif
++#endif
++
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++/*
++ * Probabilistic stack overflow check:
++ *
++ * Only check the stack in process context, because everything else
++ * runs on the big interrupt stacks. Checking reliably is too expensive,
++ * so we just check from interrupts.
++ */
++static inline void stack_overflow_check(struct pt_regs *regs)
++{
++	u64 curbase = (u64) current->thread_info;
++	static unsigned long warned = -60*HZ;
++
++	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
++	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
++	    time_after(jiffies, warned + 60*HZ)) {
++		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
++		       current->comm, curbase, regs->rsp);
++		show_stack(NULL,NULL);
++		warned = jiffies;
++	}
++}
++#endif
++
++/*
++ * Generic, controller-independent functions:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++	int i = *(loff_t *) v, j;
++	struct irqaction * action;
++	unsigned long flags;
++
++	if (i == 0) {
++		seq_printf(p, "           ");
++		for_each_online_cpu(j)
++			seq_printf(p, "CPU%-8d",j);
++		seq_putc(p, '\n');
++	}
++
++	if (i < NR_IRQS) {
++		spin_lock_irqsave(&irq_desc[i].lock, flags);
++		action = irq_desc[i].action;
++		if (!action) 
++			goto skip;
++		seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++		seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++		for_each_online_cpu(j)
++			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++		seq_printf(p, " %14s", irq_desc[i].chip->typename);
++
++		seq_printf(p, "  %s", action->name);
++		for (action=action->next; action; action = action->next)
++			seq_printf(p, ", %s", action->name);
++		seq_putc(p, '\n');
++skip:
++		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++	} else if (i == NR_IRQS) {
++		seq_printf(p, "NMI: ");
++		for_each_online_cpu(j)
++			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
++		seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++		seq_printf(p, "LOC: ");
++		for_each_online_cpu(j)
++			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
++		seq_putc(p, '\n');
++#endif
++		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++#endif
++	}
++	return 0;
++}
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
++{	
++	/* high bit used in ret_from_ code  */
++	unsigned irq = ~regs->orig_rax;
++
++	if (unlikely(irq >= NR_IRQS)) {
++		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++					__FUNCTION__, irq);
++		BUG();
++	}
++
++	exit_idle();
++	irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++	stack_overflow_check(regs);
++#endif
++	__do_IRQ(irq, regs);
++	irq_exit();
++
++	return 1;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void fixup_irqs(cpumask_t map)
++{
++	unsigned int irq;
++	static int warned;
++
++	for (irq = 0; irq < NR_IRQS; irq++) {
++		cpumask_t mask;
++		if (irq == 2)
++			continue;
++
++		cpus_and(mask, irq_desc[irq].affinity, map);
++		if (any_online_cpu(mask) == NR_CPUS) {
++			/*printk("Breaking affinity for irq %i\n", irq);*/
++			mask = map;
++		}
++		if (irq_desc[irq].chip->set_affinity)
++			irq_desc[irq].chip->set_affinity(irq, mask);
++		else if (irq_desc[irq].action && !(warned++))
++			printk("Cannot set affinity for irq %i\n", irq);
++	}
++
++	/* That doesn't seem sufficient.  Give it 1ms. */
++	local_irq_enable();
++	mdelay(1);
++	local_irq_disable();
++}
++#endif
++
++extern void call_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ 	__u32 pending;
++ 	unsigned long flags;
++
++ 	if (in_interrupt())
++ 		return;
++
++ 	local_irq_save(flags);
++ 	pending = local_softirq_pending();
++ 	/* Switch to interrupt stack */
++ 	if (pending) {
++		call_softirq();
++		WARN_ON_ONCE(softirq_count());
++	}
++ 	local_irq_restore(flags);
++}
++EXPORT_SYMBOL(do_softirq);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++        printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/ldt-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/ldt-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,282 @@
++/*
++ * linux/arch/x86_64/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ * Copyright (C) 2002 Andi Kleen
++ * 
++ * This handles calls from both 32bit and 64bit mode.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/pgalloc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++	if (current->active_mm)
++               load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
++{
++	void *oldldt;
++	void *newldt;
++	unsigned oldsize;
++
++	if (mincount <= (unsigned)pc->size)
++		return 0;
++	oldsize = pc->size;
++	mincount = (mincount+511)&(~511);
++	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++	else
++		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++	if (!newldt)
++		return -ENOMEM;
++
++	if (oldsize)
++		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++	oldldt = pc->ldt;
++	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++	wmb();
++	pc->ldt = newldt;
++	wmb();
++	pc->size = mincount;
++	wmb();
++	if (reload) {
++#ifdef CONFIG_SMP
++		cpumask_t mask;
++
++		preempt_disable();
++#endif
++		make_pages_readonly(
++			pc->ldt,
++			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		load_LDT(pc);
++#ifdef CONFIG_SMP
++		mask = cpumask_of_cpu(smp_processor_id());
++		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++			smp_call_function(flush_ldt, NULL, 1, 1);
++		preempt_enable();
++#endif
++	}
++	if (oldsize) {
++		make_pages_writable(
++			oldldt,
++			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(oldldt);
++		else
++			kfree(oldldt);
++	}
++	return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++	int err = alloc_ldt(new, old->size, 0);
++	if (err < 0)
++		return err;
++	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++	make_pages_readonly(
++		new->ldt,
++		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++		XENFEAT_writable_descriptor_tables);
++	return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++	struct mm_struct * old_mm;
++	int retval = 0;
++
++	memset(&mm->context, 0, sizeof(mm->context));
++	init_MUTEX(&mm->context.sem);
++	old_mm = current->mm;
++	if (old_mm && old_mm->context.size > 0) {
++		down(&old_mm->context.sem);
++		retval = copy_ldt(&mm->context, &old_mm->context);
++		up(&old_mm->context.sem);
++	}
++	if (retval == 0) {
++		spin_lock(&mm_unpinned_lock);
++		list_add(&mm->context.unpinned, &mm_unpinned);
++		spin_unlock(&mm_unpinned_lock);
++	}
++	return retval;
++}
++
++/*
++ * 
++ * Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++	if (mm->context.size) {
++		if (mm == current->active_mm)
++			clear_LDT();
++		make_pages_writable(
++			mm->context.ldt,
++			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(mm->context.ldt);
++		else
++			kfree(mm->context.ldt);
++		mm->context.size = 0;
++	}
++	if (!mm->context.pinned) {
++		spin_lock(&mm_unpinned_lock);
++		list_del(&mm->context.unpinned);
++		spin_unlock(&mm_unpinned_lock);
++	}
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
++	struct mm_struct * mm = current->mm;
++
++	if (!mm->context.size)
++		return 0;
++	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++	down(&mm->context.sem);
++	size = mm->context.size*LDT_ENTRY_SIZE;
++	if (size > bytecount)
++		size = bytecount;
++
++	err = 0;
++	if (copy_to_user(ptr, mm->context.ldt, size))
++		err = -EFAULT;
++	up(&mm->context.sem);
++	if (err < 0)
++		goto error_return;
++	if (size != bytecount) {
++		/* zero-fill the rest */
++		if (clear_user(ptr+size, bytecount-size) != 0) {
++			err = -EFAULT;
++			goto error_return;
++		}
++	}
++	return bytecount;
++error_return:
++	return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++	/* Arbitrary number */ 
++	/* x86-64 default LDT is all zeros */
++	if (bytecount > 128) 
++		bytecount = 128; 	
++	if (clear_user(ptr, bytecount))
++		return -EFAULT;
++	return bytecount; 
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++	struct task_struct *me = current;
++	struct mm_struct * mm = me->mm;
++	__u32 entry_1, entry_2, *lp;
++	unsigned long mach_lp;
++	int error;
++	struct user_desc ldt_info;
++
++	error = -EINVAL;
++
++	if (bytecount != sizeof(ldt_info))
++		goto out;
++	error = -EFAULT; 	
++	if (copy_from_user(&ldt_info, ptr, bytecount))
++		goto out;
++
++	error = -EINVAL;
++	if (ldt_info.entry_number >= LDT_ENTRIES)
++		goto out;
++	if (ldt_info.contents == 3) {
++		if (oldmode)
++			goto out;
++		if (ldt_info.seg_not_present == 0)
++			goto out;
++	}
++
++	down(&mm->context.sem);
++	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
++		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++		if (error < 0)
++			goto out_unlock;
++	}
++
++	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
++ 	mach_lp = arbitrary_virt_to_machine(lp);
++
++   	/* Allow LDTs to be cleared by the user. */
++   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++		if (oldmode || LDT_empty(&ldt_info)) {
++			entry_1 = 0;
++			entry_2 = 0;
++			goto install;
++		}
++	}
++
++	entry_1 = LDT_entry_a(&ldt_info);
++	entry_2 = LDT_entry_b(&ldt_info);
++	if (oldmode)
++		entry_2 &= ~(1 << 20);
++
++	/* Install the new entry ...  */
++install:
++	error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++
++out_unlock:
++	up(&mm->context.sem);
++out:
++	return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++	int ret = -ENOSYS;
++
++	switch (func) {
++	case 0:
++		ret = read_ldt(ptr, bytecount);
++		break;
++	case 1:
++		ret = write_ldt(ptr, bytecount, 1);
++		break;
++	case 2:
++		ret = read_default_ldt(ptr, bytecount);
++		break;
++	case 0x11:
++		ret = write_ldt(ptr, bytecount, 0);
++		break;
++	}
++	return ret;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/machine_kexec.c
+--- a/arch/x86_64/kernel/machine_kexec.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/machine_kexec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,6 +15,128 @@
+ #include <asm/mmu_context.h>
+ #include <asm/io.h>
+ 
++#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
++static u64 kexec_pgd[512] PAGE_ALIGNED;
++static u64 kexec_pud0[512] PAGE_ALIGNED;
++static u64 kexec_pmd0[512] PAGE_ALIGNED;
++static u64 kexec_pte0[512] PAGE_ALIGNED;
++static u64 kexec_pud1[512] PAGE_ALIGNED;
++static u64 kexec_pmd1[512] PAGE_ALIGNED;
++static u64 kexec_pte1[512] PAGE_ALIGNED;
++
++#ifdef CONFIG_XEN
++
++/* In the case of Xen, override hypervisor functions to be able to create
++ * a regular identity mapping page table...
++ */
++
++#include <xen/interface/kexec.h>
++#include <xen/interface/memory.h>
++
++#define x__pmd(x) ((pmd_t) { (x) } )
++#define x__pud(x) ((pud_t) { (x) } )
++#define x__pgd(x) ((pgd_t) { (x) } )
++
++#define x_pmd_val(x)   ((x).pmd)
++#define x_pud_val(x)   ((x).pud)
++#define x_pgd_val(x)   ((x).pgd)
++
++static inline void x_set_pmd(pmd_t *dst, pmd_t val)
++{
++	x_pmd_val(*dst) = x_pmd_val(val);
++}
++
++static inline void x_set_pud(pud_t *dst, pud_t val)
++{
++	x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
++}
++
++static inline void x_pud_clear (pud_t *pud)
++{
++	x_pud_val(*pud) = 0;
++}
++
++static inline void x_set_pgd(pgd_t *dst, pgd_t val)
++{
++	x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
++}
++
++static inline void x_pgd_clear (pgd_t * pgd)
++{
++	x_pgd_val(*pgd) = 0;
++}
++
++#define X__PAGE_KERNEL_LARGE_EXEC \
++         _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
++#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++	void *control_page;
++	void *table_page;
++
++	memset(xki->page_list, 0, sizeof(xki->page_list));
++
++	control_page = page_address(image->control_code_page) + PAGE_SIZE;
++	memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++	table_page = page_address(image->control_code_page);
++
++	xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++	xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
++
++	xki->page_list[PA_PGD] = __ma(kexec_pgd);
++	xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
++	xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
++	xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++	xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++	xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++	xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++}
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++					 struct resource *phys_cpus,
++					 int nr_phys_cpus)
++{
++	int k;
++
++	/* The per-cpu crash note resources belong to the hypervisor resource */
++	for (k = 0; k < nr_phys_cpus; k++)
++		request_resource(hypervisor, phys_cpus + k);
++
++	return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res) { ; }
++
++#else /* CONFIG_XEN */
++
++#define x__pmd(x) __pmd(x)
++#define x__pud(x) __pud(x)
++#define x__pgd(x) __pgd(x)
++
++#define x_set_pmd(x, y) set_pmd(x, y)
++#define x_set_pud(x, y) set_pud(x, y)
++#define x_set_pgd(x, y) set_pgd(x, y)
++
++#define x_pud_clear(x) pud_clear(x)
++#define x_pgd_clear(x) pgd_clear(x)
++
++#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
++#define X_KERNPG_TABLE _KERNPG_TABLE
++
++#endif /* CONFIG_XEN */
++
+ static void init_level2_page(pmd_t *level2p, unsigned long addr)
+ {
+ 	unsigned long end_addr;
+@@ -22,7 +144,7 @@
+ 	addr &= PAGE_MASK;
+ 	end_addr = addr + PUD_SIZE;
+ 	while (addr < end_addr) {
+-		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
++		x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
+ 		addr += PMD_SIZE;
+ 	}
+ }
+@@ -47,12 +169,12 @@
+ 		}
+ 		level2p = (pmd_t *)page_address(page);
+ 		init_level2_page(level2p, addr);
+-		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
++		x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
+ 		addr += PUD_SIZE;
+ 	}
+ 	/* clear the unused entries */
+ 	while (addr < end_addr) {
+-		pud_clear(level3p++);
++		x_pud_clear(level3p++);
+ 		addr += PUD_SIZE;
+ 	}
+ out:
+@@ -83,12 +205,12 @@
+ 		if (result) {
+ 			goto out;
+ 		}
+-		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
++		x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
+ 		addr += PGDIR_SIZE;
+ 	}
+ 	/* clear the unused entries */
+ 	while (addr < end_addr) {
+-		pgd_clear(level4p++);
++		x_pgd_clear(level4p++);
+ 		addr += PGDIR_SIZE;
+ 	}
+ out:
+@@ -99,76 +221,28 @@
+ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ {
+ 	pgd_t *level4p;
++	unsigned long x_end_pfn = end_pfn;
++
++#ifdef CONFIG_XEN
++	x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++#endif
++
+ 	level4p = (pgd_t *)__va(start_pgtable);
+- 	return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
++ 	return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
+ }
+-
+-static void set_idt(void *newidt, u16 limit)
+-{
+-	struct desc_ptr curidt;
+-
+-	/* x86-64 supports unaliged loads & stores */
+-	curidt.size    = limit;
+-	curidt.address = (unsigned long)newidt;
+-
+-	__asm__ __volatile__ (
+-		"lidtq %0\n"
+-		: : "m" (curidt)
+-		);
+-};
+-
+-
+-static void set_gdt(void *newgdt, u16 limit)
+-{
+-	struct desc_ptr curgdt;
+-
+-	/* x86-64 supports unaligned loads & stores */
+-	curgdt.size    = limit;
+-	curgdt.address = (unsigned long)newgdt;
+-
+-	__asm__ __volatile__ (
+-		"lgdtq %0\n"
+-		: : "m" (curgdt)
+-		);
+-};
+-
+-static void load_segments(void)
+-{
+-	__asm__ __volatile__ (
+-		"\tmovl %0,%%ds\n"
+-		"\tmovl %0,%%es\n"
+-		"\tmovl %0,%%ss\n"
+-		"\tmovl %0,%%fs\n"
+-		"\tmovl %0,%%gs\n"
+-		: : "a" (__KERNEL_DS) : "memory"
+-		);
+-}
+-
+-typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
+-					unsigned long control_code_buffer,
+-					unsigned long start_address,
+-					unsigned long pgtable) ATTRIB_NORET;
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern const unsigned long relocate_new_kernel_size;
+ 
+ int machine_kexec_prepare(struct kimage *image)
+ {
+-	unsigned long start_pgtable, control_code_buffer;
++	unsigned long start_pgtable;
+ 	int result;
+ 
+ 	/* Calculate the offsets */
+ 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+-	control_code_buffer = start_pgtable + PAGE_SIZE;
+ 
+ 	/* Setup the identity mapped 64bit page table */
+ 	result = init_pgtable(image, start_pgtable);
+ 	if (result)
+ 		return result;
+-
+-	/* Place the code in the reboot code buffer */
+-	memcpy(__va(control_code_buffer), relocate_new_kernel,
+-						relocate_new_kernel_size);
+ 
+ 	return 0;
+ }
+@@ -178,51 +252,43 @@
+ 	return;
+ }
+ 
++#ifndef CONFIG_XEN
+ /*
+  * Do not allocate memory (or fail in any way) in machine_kexec().
+  * We are past the point of no return, committed to rebooting now.
+  */
+ NORET_TYPE void machine_kexec(struct kimage *image)
+ {
+-	unsigned long page_list;
+-	unsigned long control_code_buffer;
+-	unsigned long start_pgtable;
+-	relocate_new_kernel_t rnk;
++	unsigned long page_list[PAGES_NR];
++	void *control_page;
+ 
+ 	/* Interrupts aren't acceptable while we reboot */
+ 	local_irq_disable();
+ 
+-	/* Calculate the offsets */
+-	page_list = image->head;
+-	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+-	control_code_buffer = start_pgtable + PAGE_SIZE;
++	control_page = page_address(image->control_code_page) + PAGE_SIZE;
++	memcpy(control_page, relocate_kernel, PAGE_SIZE);
+ 
+-	/* Set the low half of the page table to my identity mapped
+-	 * page table for kexec.  Leave the high half pointing at the
+-	 * kernel pages.   Don't bother to flush the global pages
+-	 * as that will happen when I fully switch to my identity mapped
+-	 * page table anyway.
+-	 */
+-	memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
+-	__flush_tlb();
++	page_list[PA_CONTROL_PAGE] = __pa(control_page);
++	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
++	page_list[PA_PGD] = __pa(kexec_pgd);
++	page_list[VA_PGD] = (unsigned long)kexec_pgd;
++	page_list[PA_PUD_0] = __pa(kexec_pud0);
++	page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
++	page_list[PA_PMD_0] = __pa(kexec_pmd0);
++	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
++	page_list[PA_PTE_0] = __pa(kexec_pte0);
++	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
++	page_list[PA_PUD_1] = __pa(kexec_pud1);
++	page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
++	page_list[PA_PMD_1] = __pa(kexec_pmd1);
++	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
++	page_list[PA_PTE_1] = __pa(kexec_pte1);
++	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
+ 
++	page_list[PA_TABLE_PAGE] =
++	  (unsigned long)__pa(page_address(image->control_code_page));
+ 
+-	/* The segment registers are funny things, they have both a
+-	 * visible and an invisible part.  Whenever the visible part is
+-	 * set to a specific selector, the invisible part is loaded
+-	 * with from a table in memory.  At no other time is the
+-	 * descriptor table in memory accessed.
+-	 *
+-	 * I take advantage of this here by force loading the
+-	 * segments, before I zap the gdt with an invalid value.
+-	 */
+-	load_segments();
+-	/* The gdt & idt are now invalid.
+-	 * If you want to load them you must set up your own idt & gdt.
+-	 */
+-	set_gdt(phys_to_virt(0),0);
+-	set_idt(phys_to_virt(0),0);
+-	/* now call it */
+-	rnk = (relocate_new_kernel_t) control_code_buffer;
+-	(*rnk)(page_list, control_code_buffer, image->start, start_pgtable);
++	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
++			image->start);
+ }
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/mpparse-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/mpparse-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1011 @@
++/*
++ *	Intel Multiprocessor Specification 1.1 and 1.4
++ *	compliant MP-table parsing routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *		Erich Boleyn	:	MP v1.4 and additional changes.
++ *		Alan Cox	:	Added EBDA scanning
++ *		Ingo Molnar	:	various cleanups and rewrites
++ *		Maciej W. Rozycki:	Bits for default MP configurations
++ *		Paul Diefenbaugh:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/acpi.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++int acpi_found_madt;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++unsigned char apic_version [MAX_APICS];
++unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++
++static int mp_current_pci_id = 0;
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++int pic_mode;
++unsigned long mp_lapic_addr = 0;
++
++
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_id = -1U;
++/* Internal processor count */
++unsigned int num_processors __initdata = 0;
++
++unsigned disabled_cpus __initdata;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++
++/* ACPI MADT entry parsing functions */
++#ifdef CONFIG_ACPI
++extern struct acpi_boot_flags acpi_boot;
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int acpi_parse_lapic (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_LOCAL_APIC*/
++#ifdef CONFIG_X86_IO_APIC
++extern int acpi_parse_ioapic (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++	int sum = 0;
++
++	while (len--)
++		sum += *mp++;
++
++	return sum & 0xFF;
++}
++
++#ifndef CONFIG_XEN
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++	int cpu;
++	unsigned char ver;
++	cpumask_t tmp_map;
++
++	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
++		disabled_cpus++;
++		return;
++	}
++
++	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
++		m->mpc_apicid,
++	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
++	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
++		m->mpc_apicver);
++
++	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++		Dprintk("    Bootup CPU\n");
++		boot_cpu_id = m->mpc_apicid;
++	}
++	if (num_processors >= NR_CPUS) {
++		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++			" Processor ignored.\n", NR_CPUS);
++		return;
++	}
++
++	num_processors++;
++	cpus_complement(tmp_map, cpu_present_map);
++	cpu = first_cpu(tmp_map);
++
++#if MAX_APICS < 255	
++	if ((int)m->mpc_apicid > MAX_APICS) {
++		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
++			m->mpc_apicid, MAX_APICS);
++		return;
++	}
++#endif
++	ver = m->mpc_apicver;
++
++	physid_set(m->mpc_apicid, phys_cpu_present_map);
++	/*
++	 * Validate version
++	 */
++	if (ver == 0x0) {
++		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++		ver = 0x10;
++	}
++	apic_version[m->mpc_apicid] = ver;
++ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ 		/*
++ 		 * bios_cpu_apicid is required to have processors listed
++ 		 * in same order as logical cpu numbers. Hence the first
++ 		 * entry is BSP, and so on.
++ 		 */
++		cpu = 0;
++ 	}
++	bios_cpu_apicid[cpu] = m->mpc_apicid;
++	x86_cpu_to_apicid[cpu] = m->mpc_apicid;
++
++	cpu_set(cpu, cpu_possible_map);
++	cpu_set(cpu, cpu_present_map);
++}
++#else
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++	num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++	char str[7];
++
++	memcpy(str, m->mpc_bustype, 6);
++	str[6] = 0;
++	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++
++	if (strncmp(str, "ISA", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++	} else if (strncmp(str, "EISA", 4) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++	} else if (strncmp(str, "PCI", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++		mp_current_pci_id++;
++	} else if (strncmp(str, "MCA", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++	} else {
++		printk(KERN_ERR "Unknown bustype %s\n", str);
++	}
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++	if (!(m->mpc_flags & MPC_APIC_USABLE))
++		return;
++
++	printk("I/O APIC #%d Version %d at 0x%X.\n",
++		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
++			MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++	}
++	if (!m->mpc_apicaddr) {
++		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++			" found in MP table, skipping!\n");
++		return;
++	}
++	mp_ioapics[nr_ioapics] = *m;
++	nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++	mp_irqs [mp_irq_entries] = *m;
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++	if (++mp_irq_entries >= MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++	/*
++	 * Well it seems all SMP boards in existence
++	 * use ExtINT/LVT1 == LINT0 and
++	 * NMI/LVT2 == LINT1 - the following check
++	 * will show us if this assumptions is false.
++	 * Until then we do not have to add baggage.
++	 */
++	if ((m->mpc_irqtype == mp_ExtINT) &&
++		(m->mpc_destapiclint != 0))
++			BUG();
++	if ((m->mpc_irqtype == mp_NMI) &&
++		(m->mpc_destapiclint != 1))
++			BUG();
++}
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++	char str[16];
++	int count=sizeof(*mpc);
++	unsigned char *mpt=((unsigned char *)mpc)+count;
++
++	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++		printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++			mpc->mpc_signature[0],
++			mpc->mpc_signature[1],
++			mpc->mpc_signature[2],
++			mpc->mpc_signature[3]);
++		return 0;
++	}
++	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++		printk("SMP mptable: checksum error!\n");
++		return 0;
++	}
++	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++			mpc->mpc_spec);
++		return 0;
++	}
++	if (!mpc->mpc_lapic) {
++		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++		return 0;
++	}
++	memcpy(str,mpc->mpc_oem,8);
++	str[8]=0;
++	printk(KERN_INFO "OEM ID: %s ",str);
++
++	memcpy(str,mpc->mpc_productid,12);
++	str[12]=0;
++	printk("Product ID: %s ",str);
++
++	printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++
++	/* save the local APIC address, it might be non-default */
++	if (!acpi_lapic)
++	mp_lapic_addr = mpc->mpc_lapic;
++
++	/*
++	 *	Now process the configuration blocks.
++	 */
++	while (count < mpc->mpc_length) {
++		switch(*mpt) {
++			case MP_PROCESSOR:
++			{
++				struct mpc_config_processor *m=
++					(struct mpc_config_processor *)mpt;
++				if (!acpi_lapic)
++				MP_processor_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_BUS:
++			{
++				struct mpc_config_bus *m=
++					(struct mpc_config_bus *)mpt;
++				MP_bus_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_IOAPIC:
++			{
++				struct mpc_config_ioapic *m=
++					(struct mpc_config_ioapic *)mpt;
++				MP_ioapic_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_INTSRC:
++			{
++				struct mpc_config_intsrc *m=
++					(struct mpc_config_intsrc *)mpt;
++
++				MP_intsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_LINTSRC:
++			{
++				struct mpc_config_lintsrc *m=
++					(struct mpc_config_lintsrc *)mpt;
++				MP_lintsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++		}
++	}
++	clustered_apic_check();
++	if (!num_processors)
++		printk(KERN_ERR "SMP mptable: no processors registered!\n");
++	return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++	unsigned int port;
++
++	port = 0x4d0 + (irq >> 3);
++	return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++	struct mpc_config_intsrc intsrc;
++	int i;
++	int ELCR_fallback = 0;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;			/* conforming */
++	intsrc.mpc_srcbus = 0;
++	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++	intsrc.mpc_irqtype = mp_INT;
++
++	/*
++	 *  If true, we have an ISA/PCI system with no IRQ entries
++	 *  in the MP table. To prevent the PCI interrupts from being set up
++	 *  incorrectly, we try to use the ELCR. The sanity check to see if
++	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++	 *  never be level sensitive, so we simply see if the ELCR agrees.
++	 *  If it does, we assume it's valid.
++	 */
++	if (mpc_default_type == 5) {
++		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
++		else {
++			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++			ELCR_fallback = 1;
++		}
++	}
++
++	for (i = 0; i < 16; i++) {
++		switch (mpc_default_type) {
++		case 2:
++			if (i == 0 || i == 13)
++				continue;	/* IRQ0 & IRQ13 not connected */
++			/* fall through */
++		default:
++			if (i == 2)
++				continue;	/* IRQ2 is never connected */
++		}
++
++		if (ELCR_fallback) {
++			/*
++			 *  If the ELCR indicates a level-sensitive interrupt, we
++			 *  copy that information over to the MP table in the
++			 *  irqflag field (level sensitive, active high polarity).
++			 */
++			if (ELCR_trigger(i))
++				intsrc.mpc_irqflag = 13;
++			else
++				intsrc.mpc_irqflag = 0;
++		}
++
++		intsrc.mpc_srcbusirq = i;
++		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
++		MP_intsrc_info(&intsrc);
++	}
++
++	intsrc.mpc_irqtype = mp_ExtINT;
++	intsrc.mpc_srcbusirq = 0;
++	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
++	MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++	struct mpc_config_processor processor;
++	struct mpc_config_bus bus;
++	struct mpc_config_ioapic ioapic;
++	struct mpc_config_lintsrc lintsrc;
++	int linttypes[2] = { mp_ExtINT, mp_NMI };
++	int i;
++
++	/*
++	 * local APIC has default address
++	 */
++	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++	/*
++	 * 2 CPUs, numbered 0 & 1.
++	 */
++	processor.mpc_type = MP_PROCESSOR;
++	/* Either an integrated APIC or a discrete 82489DX. */
++	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	processor.mpc_cpuflag = CPU_ENABLED;
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++				   (boot_cpu_data.x86_model << 4) |
++				   boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++	for (i = 0; i < 2; i++) {
++		processor.mpc_apicid = i;
++		MP_processor_info(&processor);
++	}
++
++	bus.mpc_type = MP_BUS;
++	bus.mpc_busid = 0;
++	switch (mpc_default_type) {
++		default:
++			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
++				mpc_default_type);
++			/* fall through */
++		case 1:
++		case 5:
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			break;
++		case 2:
++		case 6:
++		case 3:
++			memcpy(bus.mpc_bustype, "EISA  ", 6);
++			break;
++		case 4:
++		case 7:
++			memcpy(bus.mpc_bustype, "MCA   ", 6);
++	}
++	MP_bus_info(&bus);
++	if (mpc_default_type > 4) {
++		bus.mpc_busid = 1;
++		memcpy(bus.mpc_bustype, "PCI   ", 6);
++		MP_bus_info(&bus);
++	}
++
++	ioapic.mpc_type = MP_IOAPIC;
++	ioapic.mpc_apicid = 2;
++	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	ioapic.mpc_flags = MPC_APIC_USABLE;
++	ioapic.mpc_apicaddr = 0xFEC00000;
++	MP_ioapic_info(&ioapic);
++
++	/*
++	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
++	 */
++	construct_default_ioirq_mptable(mpc_default_type);
++
++	lintsrc.mpc_type = MP_LINTSRC;
++	lintsrc.mpc_irqflag = 0;		/* conforming */
++	lintsrc.mpc_srcbusid = 0;
++	lintsrc.mpc_srcbusirq = 0;
++	lintsrc.mpc_destapic = MP_APIC_ALL;
++	for (i = 0; i < 2; i++) {
++		lintsrc.mpc_irqtype = linttypes[i];
++		lintsrc.mpc_destapiclint = i;
++		MP_lintsrc_info(&lintsrc);
++	}
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++	struct intel_mp_floating *mpf = mpf_found;
++
++	/*
++ 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
++ 	 * processors, where MPS only supports physical.
++ 	 */
++ 	if (acpi_lapic && acpi_ioapic) {
++ 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ 		return;
++	}
++ 	else if (acpi_lapic)
++ 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++	if (mpf->mpf_feature2 & (1<<7)) {
++		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
++		pic_mode = 1;
++	} else {
++		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
++		pic_mode = 0;
++	}
++
++	/*
++	 * Now see if we need to read further.
++	 */
++	if (mpf->mpf_feature1 != 0) {
++
++		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++		construct_default_ISA_mptable(mpf->mpf_feature1);
++
++	} else if (mpf->mpf_physptr) {
++
++		/*
++		 * Read the physical hardware table.  Anything here will
++		 * override the defaults.
++		 */
++ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++			smp_found_config = 0;
++			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++			return;
++		}
++		/*
++		 * If there are no explicit MP IRQ entries, then we are
++		 * broken.  We set up most of the low 16 IO-APIC pins to
++		 * ISA defaults and hope it will work.
++		 */
++		if (!mp_irq_entries) {
++			struct mpc_config_bus bus;
++
++			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++			bus.mpc_type = MP_BUS;
++			bus.mpc_busid = 0;
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			MP_bus_info(&bus);
++
++			construct_default_ioirq_mptable(0);
++		}
++
++	} else
++		BUG();
++
++	printk(KERN_INFO "Processors: %d\n", num_processors);
++	/*
++	 * Only use the first configuration found.
++	 */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++	extern void __bad_mpf_size(void); 
++	unsigned int *bp = isa_bus_to_virt(base);
++	struct intel_mp_floating *mpf;
++
++	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++	if (sizeof(*mpf) != 16)
++		__bad_mpf_size();
++
++	while (length > 0) {
++		mpf = (struct intel_mp_floating *)bp;
++		if ((*bp == SMP_MAGIC_IDENT) &&
++			(mpf->mpf_length == 1) &&
++			!mpf_checksum((unsigned char *)bp, 16) &&
++			((mpf->mpf_specification == 1)
++				|| (mpf->mpf_specification == 4)) ) {
++
++			smp_found_config = 1;
++			mpf_found = mpf;
++			return 1;
++		}
++		bp += 4;
++		length -= 16;
++	}
++	return 0;
++}
++
++void __init find_intel_smp (void)
++{
++	unsigned int address;
++
++	/*
++	 * FIXME: Linux assumes you have 640K of base ram..
++	 * this continues the error...
++	 *
++	 * 1) Scan the bottom 1K for a signature
++	 * 2) Scan the top 1K of base RAM
++	 * 3) Scan the 64K of bios
++	 */
++	if (smp_scan_config(0x0,0x400) ||
++		smp_scan_config(639*0x400,0x400) ||
++			smp_scan_config(0xF0000,0x10000))
++		return;
++	/*
++	 * If it is an SMP machine we should know now, unless the
++	 * configuration is in an EISA/MCA bus machine with an
++	 * extended bios data area.
++	 *
++	 * there is a real-mode segmented pointer pointing to the
++	 * 4K EBDA area at 0x40E, calculate and scan it here.
++	 *
++	 * NOTE! There are Linux loaders that will corrupt the EBDA
++	 * area, and as such this kind of SMP config may be less
++	 * trustworthy, simply because the SMP table may have been
++	 * stomped on during early boot. These loaders are buggy and
++	 * should be fixed.
++	 */
++
++	address = *(unsigned short *)phys_to_virt(0x40E);
++	address <<= 4;
++	if (smp_scan_config(address, 0x1000))
++		return;
++
++	/* If we have come this far, we did not find an MP table  */
++	 printk(KERN_INFO "No mptable found.\n");
++}
++
++/*
++ * - Intel MP Configuration Table
++ */
++void __init find_smp_config (void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++	find_intel_smp();
++#endif
++}
++
++
++/* --------------------------------------------------------------------------
++                            ACPI-based MP Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++	u64			address)
++{
++#ifndef CONFIG_XEN
++	mp_lapic_addr = (unsigned long) address;
++
++	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++	if (boot_cpu_id == -1U)
++		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++
++	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __cpuinit mp_register_lapic (
++	u8			id, 
++	u8			enabled)
++{
++	struct mpc_config_processor processor;
++	int			boot_cpu = 0;
++	
++	if (id >= MAX_APICS) {
++		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++			id, MAX_APICS);
++		return;
++	}
++
++	if (id == boot_cpu_physical_apicid)
++		boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++	processor.mpc_type = MP_PROCESSOR;
++	processor.mpc_apicid = id;
++	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
++		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++#endif
++
++	MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS		0
++#define MP_MAX_IOAPIC_PIN	127
++
++static struct mp_ioapic_routing {
++	int			apic_id;
++	int			gsi_start;
++	int			gsi_end;
++	u32			pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++	int			gsi)
++{
++	int			i = 0;
++
++	/* Find the IOAPIC that manages this GSI. */
++	for (i = 0; i < nr_ioapics; i++) {
++		if ((gsi >= mp_ioapic_routing[i].gsi_start)
++			&& (gsi <= mp_ioapic_routing[i].gsi_end))
++			return i;
++	}
++
++	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++	return -1;
++}
++	
++
++void __init mp_register_ioapic (
++	u8			id, 
++	u32			address,
++	u32			gsi_base)
++{
++	int			idx = 0;
++
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++	}
++	if (!address) {
++		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++			" found in MADT table, skipping!\n");
++		return;
++	}
++
++	idx = nr_ioapics++;
++
++	mp_ioapics[idx].mpc_type = MP_IOAPIC;
++	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++	mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++	mp_ioapics[idx].mpc_apicid = id;
++	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++	
++	/* 
++	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
++	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
++	 */
++	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++	mp_ioapic_routing[idx].gsi_start = gsi_base;
++	mp_ioapic_routing[idx].gsi_end = gsi_base + 
++		io_apic_get_redir_entries(idx);
++
++	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
++		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++		mp_ioapic_routing[idx].gsi_start,
++		mp_ioapic_routing[idx].gsi_end);
++
++	return;
++}
++
++
++void __init mp_override_legacy_irq (
++	u8			bus_irq,
++	u8			polarity, 
++	u8			trigger, 
++	u32			gsi)
++{
++	struct mpc_config_intsrc intsrc;
++	int			ioapic = -1;
++	int			pin = -1;
++
++	/* 
++	 * Convert 'gsi' to 'ioapic.pin'.
++	 */
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0)
++		return;
++	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++	/*
++	 * TBD: This check is for faulty timer entries, where the override
++	 *      erroneously sets the trigger to level, resulting in a HUGE 
++	 *      increase of timer interrupts!
++	 */
++	if ((bus_irq == 0) && (trigger == 3))
++		trigger = 1;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqtype = mp_INT;
++	intsrc.mpc_irqflag = (trigger << 2) | polarity;
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
++	intsrc.mpc_dstirq = pin;				    /* INTIN# */
++
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
++		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++	mp_irqs[mp_irq_entries] = intsrc;
++	if (++mp_irq_entries == MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!\n");
++
++	return;
++}
++
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++	struct mpc_config_intsrc intsrc;
++	int			i = 0;
++	int			ioapic = -1;
++
++	/* 
++	 * Fabricate the legacy ISA bus (bus #31).
++	 */
++	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++	/* 
++	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
++	 */
++	ioapic = mp_find_ioapic(0);
++	if (ioapic < 0)
++		return;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;					/* Conforming */
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++	/* 
++	 * Use the default configuration for the IRQs 0-15.  Unless
++	 * overridden by (MADT) interrupt source override entries.
++	 */
++	for (i = 0; i < 16; i++) {
++		int idx;
++
++		for (idx = 0; idx < mp_irq_entries; idx++) {
++			struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++			/* Do we already have a mapping for this ISA IRQ? */
++			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++				break;
++
++			/* Do we already have a mapping for this IOAPIC pin */
++			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++				(irq->mpc_dstirq == i))
++				break;
++		}
++
++		if (idx != mp_irq_entries) {
++			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++			continue;			/* IRQ already used */
++		}
++
++		intsrc.mpc_irqtype = mp_INT;
++		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
++		intsrc.mpc_dstirq = i;
++
++		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
++			intsrc.mpc_dstirq);
++
++		mp_irqs[mp_irq_entries] = intsrc;
++		if (++mp_irq_entries == MAX_IRQ_SOURCES)
++			panic("Max # of irq sources exceeded!\n");
++	}
++
++	return;
++}
++
++#define MAX_GSI_NUM	4096
++
++int mp_register_gsi(u32 gsi, int triggering, int polarity)
++{
++	int			ioapic = -1;
++	int			ioapic_pin = 0;
++	int			idx, bit = 0;
++	static int		pci_irq = 16;
++	/*
++	 * Mapping between Global System Interrupts, which
++	 * represent all possible interrupts, to the IRQs
++	 * assigned to actual devices.
++	 */
++	static int		gsi_to_irq[MAX_GSI_NUM];
++
++	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
++		return gsi;
++
++	/* Don't set up the ACPI SCI because it's already set up */
++	if (acpi_fadt.sci_int == gsi)
++		return gsi;
++
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0) {
++		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++		return gsi;
++	}
++
++	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++	/* 
++	 * Avoid pin reprogramming.  PRTs typically include entries  
++	 * with redundant pin->gsi mappings (but unique PCI devices);
++	 * we only program the IOAPIC on the first.
++	 */
++	bit = ioapic_pin % 32;
++	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++	if (idx > 3) {
++		printk(KERN_ERR "Invalid reference to IOAPIC pin "
++			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
++			ioapic_pin);
++		return gsi;
++	}
++	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++		return gsi_to_irq[gsi];
++	}
++
++	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++	if (triggering == ACPI_LEVEL_SENSITIVE) {
++		/*
++		 * For PCI devices assign IRQs in order, avoiding gaps
++		 * due to unused I/O APIC pins.
++		 */
++		int irq = gsi;
++		if (gsi < MAX_GSI_NUM) {
++			/*
++			 * Retain the VIA chipset work-around (gsi > 15), but
++			 * avoid a problem where the 8254 timer (IRQ0) is setup
++			 * via an override (so it's not on pin 0 of the ioapic),
++			 * and at the same time, the pin 0 interrupt is a PCI
++			 * type.  The gsi > 15 test could cause these two pins
++			 * to be shared as IRQ0, and they are not shareable.
++			 * So test for this condition, and if necessary, avoid
++			 * the pin collision.
++			 */
++			if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++				gsi = pci_irq++;
++			/*
++			 * Don't assign IRQ used by ACPI SCI
++			 */
++			if (gsi == acpi_fadt.sci_int)
++				gsi = pci_irq++;
++			gsi_to_irq[irq] = gsi;
++		} else {
++			printk(KERN_ERR "GSI %u is too high\n", gsi);
++			return gsi;
++		}
++	}
++
++	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++		triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++		polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++	return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/pci-swiotlb-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/pci-swiotlb-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,55 @@
++/* Glue code to lib/swiotlb.c */
++
++#include <linux/pci.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/dma-mapping.h>
++#include <asm/proto.h>
++#include <asm/swiotlb.h>
++#include <asm/dma.h>
++
++#if 0
++int swiotlb __read_mostly;
++EXPORT_SYMBOL(swiotlb);
++#endif
++
++struct dma_mapping_ops swiotlb_dma_ops = {
++#if 0
++	.mapping_error = swiotlb_dma_mapping_error,
++	.alloc_coherent = swiotlb_alloc_coherent,
++	.free_coherent = swiotlb_free_coherent,
++	.map_single = swiotlb_map_single,
++	.unmap_single = swiotlb_unmap_single,
++	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
++	.sync_single_for_device = swiotlb_sync_single_for_device,
++	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
++	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
++	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
++	.sync_sg_for_device = swiotlb_sync_sg_for_device,
++	.map_sg = swiotlb_map_sg,
++	.unmap_sg = swiotlb_unmap_sg,
++	.dma_supported = NULL,
++#endif
++};
++
++void pci_swiotlb_init(void)
++{
++#if 0
++	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
++	if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
++	       swiotlb = 1;
++	if (swiotlb_force)
++		swiotlb = 1;
++	if (swiotlb) {
++		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++		swiotlb_init();
++		dma_ops = &swiotlb_dma_ops;
++	}
++#else
++	swiotlb_init();
++	if (swiotlb) {
++		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++		dma_ops = &swiotlb_dma_ops;
++	}
++#endif
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/process-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/process-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,848 @@
++/*
++ *  linux/arch/x86-64/kernel/process.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * 
++ *  X86-64 port
++ *	Andi Kleen.
++ *
++ *	CPU hotplug support - ashok.raj at intel.com
++ * 
++ *  Jun Nakajima <jun.nakajima at intel.com> 
++ *     Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++#include <linux/notifier.h>
++#include <linux/kprobes.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++#include <asm/idle.h>
++
++#include <xen/cpu_hotplug.h>
++
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++
++void idle_notifier_register(struct notifier_block *n)
++{
++	atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++	atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL(idle_notifier_unregister);
++
++enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
++static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++
++void enter_idle(void)
++{
++	__get_cpu_var(idle_state) = CPU_IDLE;
++	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++}
++
++static void __exit_idle(void)
++{
++	__get_cpu_var(idle_state) = CPU_NOT_IDLE;
++	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++}
++
++/* Called from interrupts to signify idle end */
++void exit_idle(void)
++{
++	if (current->pid | read_pda(irqcount))
++		return;
++	__exit_idle();
++}
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++	local_irq_enable();
++
++	asm volatile(
++		"2:"
++		"testl %0,%1;"
++		"rep; nop;"
++		"je 2b;"
++		: :
++		"i" (_TIF_NEED_RESCHED),
++		"m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++	local_irq_disable();
++
++	if (need_resched())
++		local_irq_enable();
++	else {
++		current_thread_info()->status &= ~TS_POLLING;
++		smp_mb__after_clear_bit();
++		safe_halt();
++		current_thread_info()->status |= TS_POLLING;
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void play_dead(void)
++{
++	idle_task_exit();
++	local_irq_disable();
++	cpu_clear(smp_processor_id(), cpu_initialized);
++	preempt_enable_no_resched();
++	VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++	cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++	current_thread_info()->status |= TS_POLLING;
++	/* endless idle loop with no priority at all */
++	while (1) {
++		while (!need_resched()) {
++			void (*idle)(void);
++
++			if (__get_cpu_var(cpu_idle_state))
++				__get_cpu_var(cpu_idle_state) = 0;
++			rmb();
++			idle = xen_idle; /* no alternatives */
++			if (cpu_is_offline(smp_processor_id()))
++				play_dead();
++			enter_idle();
++			idle();
++			__exit_idle();
++		}
++
++		preempt_enable_no_resched();
++		schedule();
++		preempt_disable();
++	}
++}
++
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map;
++
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
++
++ 	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
++
++	__get_cpu_var(cpu_idle_state) = 0;
++
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) &&
++					!per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++	} while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 
++{
++}
++
++static int __init idle_setup (char *str)
++{
++	if (!strncmp(str, "poll", 4)) {
++		printk("using polling idle threads.\n");
++		pm_idle = poll_idle;
++	}
++
++	boot_option_idle_override = 1;
++	return 1;
++}
++
++__setup("idle=", idle_setup);
++
++/* Prints also some state that isn't saved in the pt_regs */ 
++void __show_regs(struct pt_regs * regs)
++{
++	unsigned long fs, gs, shadowgs;
++	unsigned int fsindex,gsindex;
++	unsigned int ds,cs,es; 
++
++	printk("\n");
++	print_modules();
++	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++		current->pid, current->comm, print_tainted(),
++		system_utsname.release,
++		(int)strcspn(system_utsname.version, " "),
++		system_utsname.version);
++	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++	printk_address(regs->rip); 
++	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
++		regs->eflags);
++	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++	       regs->rax, regs->rbx, regs->rcx);
++	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++	       regs->rdx, regs->rsi, regs->rdi); 
++	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++	       regs->rbp, regs->r8, regs->r9); 
++	printk("R10: %016lx R11: %016lx R12: %016lx\n",
++	       regs->r10, regs->r11, regs->r12); 
++	printk("R13: %016lx R14: %016lx R15: %016lx\n",
++	       regs->r13, regs->r14, regs->r15); 
++
++	asm("mov %%ds,%0" : "=r" (ds)); 
++	asm("mov %%cs,%0" : "=r" (cs)); 
++	asm("mov %%es,%0" : "=r" (es)); 
++	asm("mov %%fs,%0" : "=r" (fsindex));
++	asm("mov %%gs,%0" : "=r" (gsindex));
++
++	rdmsrl(MSR_FS_BASE, fs);
++	rdmsrl(MSR_GS_BASE, gs); 
++	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
++
++	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
++	       fs,fsindex,gs,gsindex,shadowgs); 
++	printk("CS:  %04x DS: %04x ES: %04x\n", cs, ds, es); 
++
++}
++
++void show_regs(struct pt_regs *regs)
++{
++	printk("CPU %d:", smp_processor_id());
++	__show_regs(regs);
++	show_trace(NULL, regs, (void *)(regs + 1));
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++	struct task_struct *me = current;
++	struct thread_struct *t = &me->thread;
++
++	if (me->thread.io_bitmap_ptr) { 
++#ifndef CONFIG_X86_NO_TSS
++		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++#endif
++#ifdef CONFIG_XEN
++		struct physdev_set_iobitmap iobmp_op;
++		memset(&iobmp_op, 0, sizeof(iobmp_op));
++#endif
++
++		kfree(t->io_bitmap_ptr);
++		t->io_bitmap_ptr = NULL;
++		/*
++		 * Careful, clear this in the TSS too:
++		 */
++#ifndef CONFIG_X86_NO_TSS
++		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++		put_cpu();
++#endif
++#ifdef CONFIG_XEN
++		WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++					      &iobmp_op));
++#endif
++		t->io_bitmap_max = 0;
++	}
++}
++
++void load_gs_index(unsigned gs)
++{
++	WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs));
++}
++
++void flush_thread(void)
++{
++	struct task_struct *tsk = current;
++	struct thread_info *t = current_thread_info();
++
++	if (t->flags & _TIF_ABI_PENDING) {
++		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++		if (t->flags & _TIF_IA32)
++			current_thread_info()->status |= TS_COMPAT;
++	}
++
++	tsk->thread.debugreg0 = 0;
++	tsk->thread.debugreg1 = 0;
++	tsk->thread.debugreg2 = 0;
++	tsk->thread.debugreg3 = 0;
++	tsk->thread.debugreg6 = 0;
++	tsk->thread.debugreg7 = 0;
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	/*
++	 * Forget coprocessor state..
++	 */
++	clear_fpu(tsk);
++	clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++	if (dead_task->mm) {
++		if (dead_task->mm->context.size) {
++			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++					dead_task->comm,
++					dead_task->mm->context.ldt,
++					dead_task->mm->context.size);
++			BUG();
++		}
++	}
++}
++
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
++{
++	struct user_desc ud = { 
++		.base_addr = addr,
++		.limit = 0xfffff,
++		.seg_32bit = 1,
++		.limit_in_pages = 1,
++		.useable = 1,
++	};
++	struct n_desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	desc->a = LDT_entry_a(&ud); 
++	desc->b = LDT_entry_b(&ud); 
++}
++
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
++{
++	struct desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	return desc->base0 | 
++		(((u32)desc->base1) << 16) | 
++		(((u32)desc->base2) << 24);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++	unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
++		unsigned long unused,
++	struct task_struct * p, struct pt_regs * regs)
++{
++	int err;
++	struct pt_regs * childregs;
++	struct task_struct *me = current;
++
++	childregs = ((struct pt_regs *)
++			(THREAD_SIZE + task_stack_page(p))) - 1;
++	*childregs = *regs;
++
++	childregs->rax = 0;
++	childregs->rsp = rsp;
++	if (rsp == ~0UL)
++		childregs->rsp = (unsigned long)childregs;
++
++	p->thread.rsp = (unsigned long) childregs;
++	p->thread.rsp0 = (unsigned long) (childregs+1);
++	p->thread.userrsp = me->thread.userrsp; 
++
++	set_tsk_thread_flag(p, TIF_FORK);
++
++	p->thread.fs = me->thread.fs;
++	p->thread.gs = me->thread.gs;
++
++	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++	asm("mov %%es,%0" : "=m" (p->thread.es));
++	asm("mov %%ds,%0" : "=m" (p->thread.ds));
++
++	if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 
++		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!p->thread.io_bitmap_ptr) {
++			p->thread.io_bitmap_max = 0;
++			return -ENOMEM;
++		}
++		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
++				IO_BITMAP_BYTES);
++	} 
++
++	/*
++	 * Set a new TLS for the child thread?
++	 */
++	if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++		if (test_thread_flag(TIF_IA32))
++			err = ia32_child_tls(p, childregs); 
++		else 			
++#endif	 
++			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
++		if (err) 
++			goto out;
++	}
++        p->thread.iopl = current->thread.iopl;
++
++	err = 0;
++out:
++	if (err && p->thread.io_bitmap_ptr) {
++		kfree(p->thread.io_bitmap_ptr);
++		p->thread.io_bitmap_max = 0;
++	}
++	return err;
++}
++
++static inline void __save_init_fpu( struct task_struct *tsk )
++{
++	asm volatile( "rex64 ; fxsave %0 ; fnclex"
++		      : "=m" (tsk->thread.i387.fxsave));
++	tsk->thread_info->status &= ~TS_USEDFPU;
++}
++
++/*
++ *	switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized: 
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ *
++ * Kprobes not supported here. Set the probe on schedule instead.
++ */
++__kprobes struct task_struct *
++__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++	struct thread_struct *prev = &prev_p->thread,
++				 *next = &next_p->thread;
++	int cpu = smp_processor_id();  
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++#if CONFIG_XEN_COMPAT > 0x030002
++	struct physdev_set_iopl iopl_op;
++	struct physdev_set_iobitmap iobmp_op;
++#else
++	struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
++	multicall_entry_t _mcl[8], *mcl = _mcl;
++
++	/*
++	 * This is basically '__unlazy_fpu', except that we queue a
++	 * multicall to indicate FPU task switch, rather than
++	 * synchronously trapping to Xen.
++	 * The AMD workaround requires it to be after DS reload, or
++	 * after DS has been cleared, which we do in __prepare_arch_switch.
++	 */
++	if (prev_p->thread_info->status & TS_USEDFPU) {
++		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++		mcl->op      = __HYPERVISOR_fpu_taskswitch;
++		mcl->args[0] = 1;
++		mcl++;
++	}
++
++	/*
++	 * Reload esp0, LDT and the page table pointer:
++	 */
++	mcl->op      = __HYPERVISOR_stack_switch;
++	mcl->args[0] = __KERNEL_DS;
++	mcl->args[1] = next->rsp0;
++	mcl++;
++
++	/*
++	 * Load the per-thread Thread-Local Storage descriptor.
++	 * This is load_TLS(next, cpu) with multicalls.
++	 */
++#define C(i) do {							\
++	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
++		mcl->op      = __HYPERVISOR_update_descriptor;		\
++		mcl->args[0] = virt_to_machine(				\
++			&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]);		\
++		mcl->args[1] = next->tls_array[i];			\
++		mcl++;							\
++	}								\
++} while (0)
++	C(0); C(1); C(2);
++#undef C
++
++	if (unlikely(prev->iopl != next->iopl)) {
++		iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
++#if CONFIG_XEN_COMPAT > 0x030002
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = PHYSDEVOP_set_iopl;
++		mcl->args[1] = (unsigned long)&iopl_op;
++#else
++		mcl->op      = __HYPERVISOR_physdev_op_compat;
++		pdo->cmd     = PHYSDEVOP_set_iopl;
++		mcl->args[0] = (unsigned long)pdo++;
++#endif
++		mcl++;
++	}
++
++	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++		set_xen_guest_handle(iobmp_op.bitmap,
++				     (char *)next->io_bitmap_ptr);
++		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = PHYSDEVOP_set_iobitmap;
++		mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++		mcl->op      = __HYPERVISOR_physdev_op_compat;
++		pdo->cmd     = PHYSDEVOP_set_iobitmap;
++		mcl->args[0] = (unsigned long)pdo++;
++#endif
++		mcl++;
++	}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++	BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++	BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++	if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++		BUG();
++
++	/* 
++	 * Switch DS and ES.
++	 * This won't pick up thread selector changes, but I guess that is ok.
++	 */
++	if (unlikely(next->es))
++		loadsegment(es, next->es); 
++	
++	if (unlikely(next->ds))
++		loadsegment(ds, next->ds);
++
++	/* 
++	 * Switch FS and GS.
++	 */
++	if (unlikely(next->fsindex))
++		loadsegment(fs, next->fsindex);
++
++	if (next->fs)
++		WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs));
++	
++	if (unlikely(next->gsindex))
++		load_gs_index(next->gsindex);
++
++	if (next->gs)
++		WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs));
++
++	/* 
++	 * Switch the PDA context.
++	 */
++	prev->userrsp = read_pda(oldrsp); 
++	write_pda(oldrsp, next->userrsp); 
++	write_pda(pcurrent, next_p); 
++	write_pda(kernelstack,
++		  task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++
++	/*
++	 * Now maybe reload the debug registers
++	 */
++	if (unlikely(next->debugreg7)) {
++		set_debugreg(next->debugreg0, 0);
++		set_debugreg(next->debugreg1, 1);
++		set_debugreg(next->debugreg2, 2);
++		set_debugreg(next->debugreg3, 3);
++		/* no 4 and 5 */
++		set_debugreg(next->debugreg6, 6);
++		set_debugreg(next->debugreg7, 7);
++	}
++
++	return prev_p;
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage 
++long sys_execve(char __user *name, char __user * __user *argv,
++		char __user * __user *envp, struct pt_regs regs)
++{
++	long error;
++	char * filename;
++
++	filename = getname(name);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename)) 
++		return error;
++	error = do_execve(filename, argv, envp, &regs); 
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
++	}
++	putname(filename);
++	return error;
++}
++
++void set_personality_64bit(void)
++{
++	/* inherit personality from parent */
++
++	/* Make sure to be in 64bit mode */
++	clear_thread_flag(TIF_IA32); 
++
++	/* TBD: overwrites user setup. Should have two bits.
++	   But 64bit processes have always behaved this way,
++	   so it's not too bad. The main problem is just that
++   	   32bit childs are affected again. */
++	current->personality &= ~READ_IMPLIES_EXEC;
++}
++
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
++
++asmlinkage long
++sys_clone(unsigned long clone_flags, unsigned long newsp,
++	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++	if (!newsp)
++		newsp = regs->rsp;
++	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++		    NULL, NULL);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long stack;
++	u64 fp,rip;
++	int count = 0;
++
++	if (!p || p == current || p->state==TASK_RUNNING)
++		return 0; 
++	stack = (unsigned long)task_stack_page(p);
++	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++		return 0;
++	fp = *(u64 *)(p->thread.rsp);
++	do { 
++		if (fp < (unsigned long)stack ||
++		    fp > (unsigned long)stack+THREAD_SIZE)
++			return 0; 
++		rip = *(u64 *)(fp+8); 
++		if (!in_sched_functions(rip))
++			return rip; 
++		fp = *(u64 *)fp; 
++	} while (count++ < 16); 
++	return 0;
++}
++
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{ 
++	int ret = 0; 
++	int doit = task == current;
++	int cpu;
++
++	switch (code) { 
++	case ARCH_SET_GS:
++		if (addr >= TASK_SIZE_OF(task))
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) {  
++			set_32bit_tls(task, GS_TLS, addr); 
++			if (doit) { 
++				load_TLS(&task->thread, cpu);
++				load_gs_index(GS_TLS_SEL); 
++			}
++			task->thread.gsindex = GS_TLS_SEL; 
++			task->thread.gs = 0;
++		} else { 
++			task->thread.gsindex = 0;
++			task->thread.gs = addr;
++			if (doit) {
++				load_gs_index(0);
++				ret = HYPERVISOR_set_segment_base(
++					SEGBASE_GS_USER, addr);
++			} 
++		}
++		put_cpu();
++		break;
++	case ARCH_SET_FS:
++		/* Not strictly needed for fs, but do it for symmetry
++		   with gs */
++		if (addr >= TASK_SIZE_OF(task))
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) { 
++			set_32bit_tls(task, FS_TLS, addr);
++			if (doit) { 
++				load_TLS(&task->thread, cpu); 
++				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
++			}
++			task->thread.fsindex = FS_TLS_SEL;
++			task->thread.fs = 0;
++		} else { 
++			task->thread.fsindex = 0;
++			task->thread.fs = addr;
++			if (doit) {
++				/* set the selector to 0 to not confuse
++				   __switch_to */
++				asm volatile("movl %0,%%fs" :: "r" (0));
++                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
++								  addr);
++			}
++		}
++		put_cpu();
++		break;
++	case ARCH_GET_FS: { 
++		unsigned long base; 
++		if (task->thread.fsindex == FS_TLS_SEL)
++			base = read_32bit_tls(task, FS_TLS);
++		else if (doit)
++			rdmsrl(MSR_FS_BASE, base);
++		else
++			base = task->thread.fs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break; 
++	}
++	case ARCH_GET_GS: { 
++		unsigned long base;
++		unsigned gsindex;
++		if (task->thread.gsindex == GS_TLS_SEL)
++			base = read_32bit_tls(task, GS_TLS);
++		else if (doit) {
++ 			asm("movl %%gs,%0" : "=r" (gsindex));
++			if (gsindex)
++				rdmsrl(MSR_KERNEL_GS_BASE, base);
++			else
++				base = task->thread.gs;
++		}
++		else
++			base = task->thread.gs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break;
++	}
++
++	default:
++		ret = -EINVAL;
++		break;
++	} 
++
++	return ret;	
++} 
++
++long sys_arch_prctl(int code, unsigned long addr)
++{
++	return do_arch_prctl(current, code, addr);
++} 
++
++/* 
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++	struct pt_regs *pp, ptregs;
++
++	pp = task_pt_regs(tsk);
++
++	ptregs = *pp; 
++	ptregs.cs &= 0xffff;
++	ptregs.ss &= 0xffff;
++
++	elf_core_copy_regs(regs, &ptregs);
++ 
++        boot_option_idle_override = 1;
++	return 1;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++	if (randomize_va_space)
++		sp -= get_random_int() % 8192;
++	return sp & ~0xf;
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/relocate_kernel.S
+--- a/arch/x86_64/kernel/relocate_kernel.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/relocate_kernel.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -7,31 +7,195 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/page.h>
++#include <asm/kexec.h>
+ 
+-	/*
+-	 * Must be relocatable PIC code callable as a C function, that once
+-	 * it starts can not use the previous processes stack.
++/*
++ * Must be relocatable PIC code callable as a C function
++ */
++
++#define PTR(x) (x << 3)
++#define PAGE_ALIGNED (1 << PAGE_SHIFT)
++#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
++
++	.text
++	.align PAGE_ALIGNED
++	.code64
++	.globl relocate_kernel
++relocate_kernel:
++	/* %rdi indirection_page
++	 * %rsi page_list
++	 * %rdx start address
+ 	 */
+-	.globl relocate_new_kernel
+-	.code64
++
++	/* map the control page at its virtual address */
++
++	movq	$0x0000ff8000000000, %r10        /* mask */
++	mov	$(39 - 3), %cl                   /* bits to shift */
++	movq	PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PGD)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PUD_0)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PUD_0)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PMD_0)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PMD_0)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PTE_0)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PTE_0)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	/* identity map the control page at its physical address */
++
++	movq	$0x0000ff8000000000, %r10        /* mask */
++	mov	$(39 - 3), %cl                   /* bits to shift */
++	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PGD)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PUD_1)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PUD_1)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PMD_1)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PMD_1)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_PTE_1)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
++	shrq	$9, %r10
++	sub	$9, %cl
++
++	movq	%r11, %r9
++	andq	%r10, %r9
++	shrq	%cl, %r9
++
++	movq	PTR(VA_PTE_1)(%rsi), %r8
++	addq	%r8, %r9
++	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
++	orq	$PAGE_ATTR, %r8
++	movq	%r8, (%r9)
++
+ relocate_new_kernel:
+-	/* %rdi page_list
+-	 * %rsi reboot_code_buffer
++	/* %rdi indirection_page
++	 * %rsi page_list
+ 	 * %rdx start address
+-	 * %rcx page_table
+-	 * %r8  arg5
+-	 * %r9  arg6
+ 	 */
+ 
+ 	/* zero out flags, and disable interrupts */
+ 	pushq $0
+ 	popfq
+ 
+-	/* set a new stack at the bottom of our page... */
+-	lea   4096(%rsi), %rsp
++	/* get physical address of control page now */
++	/* this is impossible after page table switch */
++	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
+ 
+-	/* store the parameters back on the stack */
+-	pushq	%rdx /* store the start address */
++	/* get physical address of page table now too */
++	movq	PTR(PA_TABLE_PAGE)(%rsi), %rcx
++
++	/* switch to new set of page tables */
++	movq	PTR(PA_PGD)(%rsi), %r9
++	movq	%r9, %cr3
++
++	/* setup idt */
++	movq    %r8, %rax
++	addq    $(idt_80 - relocate_kernel), %rax
++	lidtq   (%rax)
++
++	/* setup gdt */
++	movq    %r8, %rax
++	addq    $(gdt - relocate_kernel), %rax
++	movq    %r8, %r9
++	addq    $((gdt_80 - relocate_kernel) + 2), %r9
++	movq    %rax, (%r9)
++
++	movq    %r8, %rax
++	addq    $(gdt_80 - relocate_kernel), %rax
++	lgdtq   (%rax)
++
++	/* setup data segment registers */
++	xorl	%eax, %eax
++	movl    %eax, %ds
++	movl    %eax, %es
++	movl    %eax, %fs
++	movl    %eax, %gs
++	movl    %eax, %ss
++	
++	/* setup a new stack at the end of the physical control page */
++	lea	4096(%r8), %rsp
++
++	/* load new code segment and jump to identity mapped page */
++	movq	%r8, %rax
++	addq    $(identity_mapped - relocate_kernel), %rax
++	pushq	$(gdt_cs - gdt)
++	pushq	%rax
++	lretq
++
++identity_mapped:
++	/* store the start address on the stack */
++	pushq   %rdx
+ 
+ 	/* Set cr0 to a known state:
+ 	 * 31 1 == Paging enabled
+@@ -134,10 +298,19 @@
+ 	xorq	%r13, %r13
+ 	xorq	%r14, %r14
+ 	xorq	%r15, %r15
++	ret
+ 
+-	ret
+-relocate_new_kernel_end:
++	.align  16
++gdt:
++	.quad	0x0000000000000000	/* NULL descriptor */
++gdt_cs:
++	.quad   0x00af9a000000ffff
++gdt_end:
+ 
+-	.globl relocate_new_kernel_size
+-relocate_new_kernel_size:
+-	.quad relocate_new_kernel_end - relocate_new_kernel
++gdt_80:
++	.word	gdt_end - gdt - 1	/* limit */
++	.quad	0			/* base - filled in by code above */
++
++idt_80:
++	.word	0			/* limit */
++	.quad	0			/* base */
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/setup-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/setup-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1654 @@
++/*
++ *  linux/arch/x86-64/kernel/setup.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Nov 2001 Dave Jones <davej at suse.de>
++ *  Forked from i386 setup code.
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <linux/mmzone.h>
++#include <linux/kexec.h>
++#include <linux/cpufreq.h>
++#include <linux/dmi.h>
++#include <linux/dma-mapping.h>
++#include <linux/ctype.h>
++
++#include <asm/mtrr.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
++#include <asm/proto.h>
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#include <asm/sections.h>
++#include <asm/dmi.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <xen/interface/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <xen/interface/nmi.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
++#include <asm/mach-xen/setup_arch_post.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++extern unsigned long start_pfn;
++extern struct edid_info edid_info;
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++	xen_panic_event, NULL, 0 /* try to go last */
++};
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
++
++/*
++ * Machine setup..
++ */
++
++struct cpuinfo_x86 boot_cpu_data __read_mostly;
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++int acpi_disabled;
++EXPORT_SYMBOL(acpi_disabled);
++#ifdef	CONFIG_ACPI
++extern int __initdata acpi_ht;
++extern acpi_interrupt_flags	acpi_sci_flags;
++int __initdata acpi_force = 0;
++#endif
++
++int acpi_numa __initdata;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++unsigned long saved_video_mode;
++
++/* 
++ * Early DMI memory
++ */
++int dmi_alloc_index;
++char dmi_alloc_data[DMI_MAX_DATA];
++
++/*
++ * Setup options
++ */
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct sys_desc_table_struct {
++	unsigned short length;
++	unsigned char table[0];
++};
++
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern int root_mountflags;
++
++char command_line[COMMAND_LINE_SIZE];
++
++struct resource standard_io_resources[] = {
++	{ .name = "dma1", .start = 0x00, .end = 0x1f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic1", .start = 0x20, .end = 0x21,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer0", .start = 0x40, .end = 0x43,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer1", .start = 0x50, .end = 0x53,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "keyboard", .start = 0x60, .end = 0x60,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "keyboard", .start = 0x64, .end = 0x64,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "fpu", .start = 0xf0, .end = 0xff,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
++
++#define STANDARD_IO_RESOURCES \
++	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++
++struct resource data_resource = {
++	.name = "Kernel data",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++	.name = "Kernel code",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
++
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++
++static struct resource system_rom_resource = {
++	.name = "System ROM",
++	.start = 0xf0000,
++	.end = 0xfffff,
++	.flags = IORESOURCE_ROM,
++};
++
++static struct resource extension_rom_resource = {
++	.name = "Extension ROM",
++	.start = 0xe0000,
++	.end = 0xeffff,
++	.flags = IORESOURCE_ROM,
++};
++
++static struct resource adapter_rom_resources[] = {
++	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM }
++};
++
++#define ADAPTER_ROM_RESOURCES \
++	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++	.name = "Video ROM",
++	.start = 0xc0000,
++	.end = 0xc7fff,
++	.flags = IORESOURCE_ROM,
++};
++
++static struct resource video_ram_resource = {
++	.name = "Video RAM area",
++	.start = 0xa0000,
++	.end = 0xbffff,
++	.flags = IORESOURCE_RAM,
++};
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++	unsigned char *p, sum = 0;
++
++	for (p = rom; p < rom + length; p++)
++		sum += *p;
++	return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++	unsigned long start, length, upper;
++	unsigned char *rom;
++	int	      i;
++
++#ifdef CONFIG_XEN
++	/* Nothing to do if not running in dom0. */
++	if (!is_initial_xendomain())
++		return;
++#endif
++
++	/* video rom */
++	upper = adapter_rom_resources[0].start;
++	for (start = video_rom_resource.start; start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		video_rom_resource.start = start;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* if checksum okay, trust length byte */
++		if (length && romchecksum(rom, length))
++			video_rom_resource.end = start + length - 1;
++
++		request_resource(&iomem_resource, &video_rom_resource);
++		break;
++			}
++
++	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++	if (start < upper)
++		start = upper;
++
++	/* system rom */
++	request_resource(&iomem_resource, &system_rom_resource);
++	upper = system_rom_resource.start;
++
++	/* check for extension rom (ignore length byte!) */
++	rom = isa_bus_to_virt(extension_rom_resource.start);
++	if (romsignature(rom)) {
++		length = extension_rom_resource.end - extension_rom_resource.start + 1;
++		if (romchecksum(rom, length)) {
++			request_resource(&iomem_resource, &extension_rom_resource);
++			upper = extension_rom_resource.start;
++		}
++	}
++
++	/* check for adapter roms on 2k boundaries */
++	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* but accept any length that fits if checksum okay */
++		if (!length || start + length > upper || !romchecksum(rom, length))
++			continue;
++
++		adapter_rom_resources[i].start = start;
++		adapter_rom_resources[i].end = start + length - 1;
++		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++		start = adapter_rom_resources[i++].end & ~2047UL;
++	}
++}
++
++/* Check for full argument with no trailing characters */
++static int fullarg(char *p, char *arg)
++{
++	int l = strlen(arg);
++	return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++}
++
++static __init void parse_cmdline_early (char ** cmdline_p)
++{
++	char c = ' ', *to = command_line, *from = COMMAND_LINE;
++	int len = 0;
++	int userdef = 0;
++
++	for (;;) {
++		if (c != ' ') 
++			goto next_char; 
++
++#ifdef  CONFIG_SMP
++		/*
++		 * If the BIOS enumerates physical processors before logical,
++		 * maxcpus=N at enumeration-time can be used to disable HT.
++		 */
++		else if (!memcmp(from, "maxcpus=", 8)) {
++			extern unsigned int maxcpus;
++
++			maxcpus = simple_strtoul(from + 8, NULL, 0);
++		}
++#endif
++#ifdef CONFIG_ACPI
++		/* "acpi=off" disables both ACPI table parsing and interpreter init */
++		if (fullarg(from,"acpi=off"))
++			disable_acpi();
++
++		if (fullarg(from, "acpi=force")) { 
++			/* add later when we do DMI horrors: */
++			acpi_force = 1;
++			acpi_disabled = 0;
++		}
++
++		/* acpi=ht just means: do ACPI MADT parsing 
++		   at bootup, but don't enable the full ACPI interpreter */
++		if (fullarg(from, "acpi=ht")) { 
++			if (!acpi_force)
++				disable_acpi();
++			acpi_ht = 1; 
++		}
++                else if (fullarg(from, "pci=noacpi")) 
++			acpi_disable_pci();
++		else if (fullarg(from, "acpi=noirq"))
++			acpi_noirq_set();
++
++		else if (fullarg(from, "acpi_sci=edge"))
++			acpi_sci_flags.trigger =  1;
++		else if (fullarg(from, "acpi_sci=level"))
++			acpi_sci_flags.trigger = 3;
++		else if (fullarg(from, "acpi_sci=high"))
++			acpi_sci_flags.polarity = 1;
++		else if (fullarg(from, "acpi_sci=low"))
++			acpi_sci_flags.polarity = 3;
++
++		/* acpi=strict disables out-of-spec workarounds */
++		else if (fullarg(from, "acpi=strict")) {
++			acpi_strict = 1;
++		}
++#ifdef CONFIG_X86_IO_APIC
++		else if (fullarg(from, "acpi_skip_timer_override"))
++			acpi_skip_timer_override = 1;
++#endif
++#endif
++
++#ifndef CONFIG_XEN
++		if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
++			clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++			disable_apic = 1;
++		}
++
++		if (fullarg(from, "noapic"))
++			skip_ioapic_setup = 1;
++
++		if (fullarg(from,"apic")) {
++			skip_ioapic_setup = 0;
++			ioapic_force = 1;
++		}
++#endif
++			
++		if (!memcmp(from, "mem=", 4))
++			parse_memopt(from+4, &from); 
++
++		if (!memcmp(from, "memmap=", 7)) {
++			/* exactmap option is for used defined memory */
++			if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++				/* If we are doing a crash dump, we
++				 * still need to know the real mem
++				 * size before original memory map is
++				 * reset.
++				 */
++				saved_max_pfn = e820_end_of_ram();
++#endif
++				from += 8+7;
++				end_pfn_map = 0;
++				e820.nr_map = 0;
++				userdef = 1;
++			}
++			else {
++				parse_memmapopt(from+7, &from);
++				userdef = 1;
++			}
++		}
++
++#ifdef CONFIG_NUMA
++		if (!memcmp(from, "numa=", 5))
++			numa_setup(from+5); 
++#endif
++
++		if (!memcmp(from,"iommu=",6)) { 
++			iommu_setup(from+6); 
++		}
++
++		if (fullarg(from,"oops=panic"))
++			panic_on_oops = 1;
++
++		if (!memcmp(from, "noexec=", 7))
++			nonx_setup(from + 7);
++
++#ifdef CONFIG_KEXEC
++		/* crashkernel=size at addr specifies the location to reserve for
++		 * a crash kernel.  By reserving this memory we guarantee
++		 * that linux never set's it up as a DMA target.
++		 * Useful for holding code to do something appropriate
++		 * after a kernel panic.
++		 */
++		else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++			unsigned long size, base;
++			size = memparse(from+12, &from);
++			if (*from == '@') {
++				base = memparse(from+1, &from);
++				/* FIXME: Do I want a sanity check
++				 * to validate the memory range?
++				 */
++				crashk_res.start = base;
++				crashk_res.end   = base + size - 1;
++			}
++#else
++			printk("Ignoring crashkernel command line, "
++			       "parameter will be supplied by xen\n");
++#endif
++		}
++#endif
++
++#ifdef CONFIG_PROC_VMCORE
++		/* elfcorehdr= specifies the location of elf core header
++		 * stored by the crashed kernel. This option will be passed
++		 * by kexec loader to the capture kernel.
++		 */
++		else if(!memcmp(from, "elfcorehdr=", 11))
++			elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
++		else if (!memcmp(from, "additional_cpus=", 16))
++			setup_additional_cpus(from+16);
++#endif
++
++	next_char:
++		c = *(from++);
++		if (!c)
++			break;
++		if (COMMAND_LINE_SIZE <= ++len)
++			break;
++		*(to++) = c;
++	}
++	if (userdef) {
++		printk(KERN_INFO "user-defined physical RAM map:\n");
++		e820_print_map("user");
++	}
++	*to = '\0';
++	*cmdline_p = command_line;
++}
++
++#ifndef CONFIG_NUMA
++static void __init
++contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
++{
++	unsigned long bootmap_size, bootmap;
++
++	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++	if (bootmap == -1L)
++		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++#ifdef CONFIG_XEN
++	e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
++#else
++	e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
++#endif
++	reserve_bootmem(bootmap, bootmap_size);
++} 
++#endif
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ *              from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++     edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
++
++unsigned __initdata ebda_addr;
++unsigned __initdata ebda_size;
++
++static void discover_ebda(void)
++{
++	/*
++	 * there is a real-mode segmented pointer pointing to the 
++	 * 4K EBDA area at 0x40E
++	 */
++	ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++	ebda_addr <<= 4;
++
++	ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++
++	/* Round EBDA up to pages */
++	if (ebda_size == 0)
++		ebda_size = 1;
++	ebda_size <<= 10;
++	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++	if (ebda_size > 64*1024)
++		ebda_size = 64*1024;
++}
++#else
++#define discover_ebda() ((void)0)
++#endif
++
++void __init setup_arch(char **cmdline_p)
++{
++#ifdef CONFIG_XEN
++	/* Register a call for panic conditions. */
++	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
++ 	screen_info = SCREEN_INFO;
++
++	if (is_initial_xendomain()) {
++		const struct dom0_vga_console_info *info =
++			(void *)((char *)xen_start_info +
++			         xen_start_info->console.dom0.info_off);
++
++		dom0_init_screen_info(info,
++		                      xen_start_info->console.dom0.info_size);
++		xen_start_info->console.domU.mfn = 0;
++		xen_start_info->console.domU.evtchn = 0;
++	} else
++		screen_info.orig_video_isVGA = 0;
++
++	copy_edid();
++
++	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++				     VMASST_TYPE_writable_pagetables));
++
++	ARCH_SETUP
++#else
++ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ 	screen_info = SCREEN_INFO;
++	edid_info = EDID_INFO;
++#endif	/* !CONFIG_XEN */
++	saved_video_mode = SAVED_VIDEO_MODE;
++	bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++	setup_memory_region();
++	copy_edd();
++
++	if (!MOUNT_ROOT_RDONLY)
++		root_mountflags &= ~MS_RDONLY;
++	init_mm.start_code = (unsigned long) &_text;
++	init_mm.end_code = (unsigned long) &_etext;
++	init_mm.end_data = (unsigned long) &_edata;
++	init_mm.brk = (unsigned long) &_end;
++
++	code_resource.start = virt_to_phys(&_text);
++	code_resource.end = virt_to_phys(&_etext)-1;
++	data_resource.start = virt_to_phys(&_etext);
++	data_resource.end = virt_to_phys(&_edata)-1;
++
++	parse_cmdline_early(cmdline_p);
++
++	early_identify_cpu(&boot_cpu_data);
++
++	/*
++	 * partially used pages are not usable - thus
++	 * we are rounding upwards:
++	 */
++	end_pfn = e820_end_of_ram();
++	num_physpages = end_pfn;		/* for pfn_valid */
++
++	check_efer();
++
++	discover_ebda();
++
++	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++
++	if (is_initial_xendomain())
++		dmi_scan_machine();
++
++#ifdef CONFIG_ACPI_NUMA
++	/*
++	 * Parse SRAT to discover nodes.
++	 */
++	acpi_numa_init();
++#endif
++
++#ifdef CONFIG_NUMA
++	numa_initmem_init(0, end_pfn); 
++#else
++	contig_initmem_init(0, end_pfn);
++#endif
++
++#ifdef CONFIG_XEN
++	/*
++	 * Reserve kernel, physmap, start info, initial page tables, and
++	 * direct mapping.
++	 */
++	reserve_bootmem_generic(__pa_symbol(&_text),
++	                        (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
++#else
++	/* Reserve direct mapping */
++	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
++				(table_end - table_start) << PAGE_SHIFT);
++
++	/* reserve kernel */
++	reserve_bootmem_generic(__pa_symbol(&_text),
++				__pa_symbol(&_end) - __pa_symbol(&_text));
++
++	/*
++	 * reserve physical page 0 - it's a special BIOS page on many boxes,
++	 * enabling clean reboots, SMP operation, laptop functions.
++	 */
++	reserve_bootmem_generic(0, PAGE_SIZE);
++
++	/* reserve ebda region */
++	if (ebda_addr)
++		reserve_bootmem_generic(ebda_addr, ebda_size);
++
++#ifdef CONFIG_SMP
++	/*
++	 * But first pinch a few for the stack/trampoline stuff
++	 * FIXME: Don't need the extra page at 4K, but need to fix
++	 * trampoline before removing it. (see the GDT stuff)
++	 */
++	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++
++	/* Reserve SMP trampoline */
++	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++#endif
++#endif
++
++#ifdef CONFIG_ACPI_SLEEP
++       /*
++        * Reserve low memory region for sleep support.
++        */
++       acpi_reserve_bootmem();
++#endif
++#ifdef CONFIG_XEN
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (xen_start_info->mod_start) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++			initrd_below_start_ok = 1;
++		} else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++				(unsigned long)(INITRD_START + INITRD_SIZE),
++				(unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
++#endif
++#else	/* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (LOADER_TYPE && INITRD_START) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++			initrd_start =
++				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++			initrd_end = initrd_start+INITRD_SIZE;
++		}
++		else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++			    (unsigned long)(INITRD_START + INITRD_SIZE),
++			    (unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
++#endif
++#endif	/* !CONFIG_XEN */
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++	xen_machine_kexec_setup_resources();
++#else
++	if (crashk_res.start != crashk_res.end) {
++		reserve_bootmem_generic(crashk_res.start,
++			crashk_res.end - crashk_res.start + 1);
++	}
++#endif
++#endif
++
++	paging_init();
++#ifdef CONFIG_X86_LOCAL_APIC
++	/*
++	 * Find and reserve possible boot-time SMP configuration:
++	 */
++	find_smp_config();
++#endif
++#ifdef CONFIG_XEN
++	{
++		int i, j, k, fpp;
++		unsigned long p2m_pages;
++
++		p2m_pages = end_pfn;
++		if (xen_start_info->nr_pages > end_pfn) {
++			/*
++			 * the end_pfn was shrunk (probably by mem= or highmem=
++			 * kernel parameter); shrink reservation with the HV
++			 */
++			struct xen_memory_reservation reservation = {
++				.address_bits = 0,
++				.extent_order = 0,
++				.domid = DOMID_SELF
++			};
++			unsigned int difference;
++			int ret;
++			
++			difference = xen_start_info->nr_pages - end_pfn;
++			
++			set_xen_guest_handle(reservation.extent_start,
++					     ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
++			reservation.nr_extents = difference;
++			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++						   &reservation);
++			BUG_ON (ret != difference);
++		}
++		else if (end_pfn > xen_start_info->nr_pages)
++			p2m_pages = xen_start_info->nr_pages;
++
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* Make sure we have a large enough P->M table. */
++			phys_to_machine_mapping = alloc_bootmem_pages(
++				end_pfn * sizeof(unsigned long));
++			memset(phys_to_machine_mapping, ~0,
++			       end_pfn * sizeof(unsigned long));
++			memcpy(phys_to_machine_mapping,
++			       (unsigned long *)xen_start_info->mfn_list,
++			       p2m_pages * sizeof(unsigned long));
++			free_bootmem(
++				__pa(xen_start_info->mfn_list),
++				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++						sizeof(unsigned long))));
++
++			/*
++			 * Initialise the list of the frames that specify the
++			 * list of frames that make up the p2m table. Used by
++                         * save/restore.
++			 */
++			pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++
++			fpp = PAGE_SIZE/sizeof(unsigned long);
++			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
++				if ((j % fpp) == 0) {
++					k++;
++					BUG_ON(k>=fpp);
++					pfn_to_mfn_frame_list[k] =
++						alloc_bootmem_pages(PAGE_SIZE);
++					pfn_to_mfn_frame_list_list[k] =
++						virt_to_mfn(pfn_to_mfn_frame_list[k]);
++					j=0;
++				}
++				pfn_to_mfn_frame_list[k][j] =
++					virt_to_mfn(&phys_to_machine_mapping[i]);
++			}
++			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++				virt_to_mfn(pfn_to_mfn_frame_list_list);
++		}
++
++		/* Mark all ISA DMA channels in-use - using them wouldn't work. */
++		for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++			if (i != 4 && request_dma(i, "xen") != 0)
++				BUG();
++	}
++
++	if (!is_initial_xendomain()) {
++		acpi_disabled = 1;
++#ifdef  CONFIG_ACPI
++		acpi_ht = 0;
++#endif
++	}
++#endif
++
++#ifndef CONFIG_XEN
++	check_ioapic();
++#endif
++
++	zap_low_mappings(0);
++
++	/*
++	 * set this early, so we dont allocate cpu0
++	 * if MADT list doesnt list BSP first
++	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
++	 */
++	cpu_set(0, cpu_present_map);
++#ifdef CONFIG_ACPI
++	/*
++	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++	 * Call this early for SRAT node setup.
++	 */
++	acpi_boot_table_init();
++
++	/*
++	 * Read APIC and some other early information from ACPI tables.
++	 */
++	acpi_boot_init();
++#endif
++
++	init_cpu_to_node();
++
++#ifdef CONFIG_X86_LOCAL_APIC
++	/*
++	 * get boot-time SMP configuration:
++	 */
++	if (smp_found_config)
++		get_smp_config();
++#ifndef CONFIG_XEN
++	init_apic_mappings();
++#endif
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++	prefill_possible_map();
++#endif
++
++	/*
++	 * Request address space for all standard RAM and ROM resources
++	 * and also for regions reported as reserved by the e820.
++	 */
++	probe_roms();
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain())
++		e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++#else
++	e820_reserve_resources(e820.map, e820.nr_map);
++#endif
++
++	request_resource(&iomem_resource, &video_ram_resource);
++
++	{
++	unsigned i;
++	/* request I/O space for devices used on all i[345]86 PCs */
++	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++		request_resource(&ioport_resource, &standard_io_resources[i]);
++	}
++
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain())
++		e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++#else
++	e820_setup_gap(e820.map, e820.nr_map);
++#endif
++
++#ifdef CONFIG_XEN
++	{
++		struct physdev_set_iopl set_iopl;
++
++		set_iopl.iopl = 1;
++		WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++		if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++			conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++			conswitchp = &dummy_con;
++#endif
++#endif
++		} else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++			conswitchp = &dummy_con;
++#endif
++                }
++	}
++#else	/* CONFIG_XEN */
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++	conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++	conswitchp = &dummy_con;
++#endif
++#endif
++
++#endif /* !CONFIG_XEN */
++}
++
++#ifdef CONFIG_XEN
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++	HYPERVISOR_shutdown(SHUTDOWN_crash);
++	/* we're never actually going to get here... */
++	return NOTIFY_DONE;
++}
++#endif /* !CONFIG_XEN */
++
++
++static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++	unsigned int *v;
++
++	if (c->extended_cpuid_level < 0x80000004)
++		return 0;
++
++	v = (unsigned int *) c->x86_model_id;
++	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++	c->x86_model_id[48] = 0;
++	return 1;
++}
++
++
++static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++	unsigned int n, dummy, eax, ebx, ecx, edx;
++
++	n = c->extended_cpuid_level;
++
++	if (n >= 0x80000005) {
++		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size=(ecx>>24)+(edx>>24);
++		/* On K8 L1 TLB is inclusive, so don't count it */
++		c->x86_tlbsize = 0;
++	}
++
++	if (n >= 0x80000006) {
++		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++		ecx = cpuid_ecx(0x80000006);
++		c->x86_cache_size = ecx >> 16;
++		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++
++		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++		c->x86_cache_size, ecx & 0xFF);
++	}
++
++	if (n >= 0x80000007)
++		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
++	if (n >= 0x80000008) {
++		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++	}
++}
++
++#ifdef CONFIG_NUMA
++static int nearby_node(int apicid)
++{
++	int i;
++	for (i = apicid - 1; i >= 0; i--) {
++		int node = apicid_to_node[i];
++		if (node != NUMA_NO_NODE && node_online(node))
++			return node;
++	}
++	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
++		int node = apicid_to_node[i];
++		if (node != NUMA_NO_NODE && node_online(node))
++			return node;
++	}
++	return first_node(node_online_map); /* Shouldn't happen */
++}
++#endif
++
++/*
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
++ */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	unsigned bits;
++#ifdef CONFIG_NUMA
++	int cpu = smp_processor_id();
++	int node = 0;
++	unsigned apicid = hard_smp_processor_id();
++#endif
++	unsigned ecx = cpuid_ecx(0x80000008);
++
++	c->x86_max_cores = (ecx & 0xff) + 1;
++
++	/* CPU telling us the core id bits shift? */
++	bits = (ecx >> 12) & 0xF;
++
++	/* Otherwise recompute */
++	if (bits == 0) {
++		while ((1 << bits) < c->x86_max_cores)
++			bits++;
++	}
++
++	/* Low order bits define the core id (index of core in socket) */
++	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++	/* Convert the APIC ID into the socket ID */
++	c->phys_proc_id = phys_pkg_id(bits);
++
++#ifdef CONFIG_NUMA
++  	node = c->phys_proc_id;
++ 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
++ 		node = apicid_to_node[apicid];
++ 	if (!node_online(node)) {
++ 		/* Two possibilities here:
++ 		   - The CPU is missing memory and no node was created.
++ 		   In that case try picking one from a nearby CPU
++ 		   - The APIC IDs differ from the HyperTransport node IDs
++ 		   which the K8 northbridge parsing fills in.
++ 		   Assume they are all increased by a constant offset,
++ 		   but in the same order as the HT nodeids.
++ 		   If that doesn't result in a usable node fall back to the
++ 		   path for the previous case.  */
++ 		int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
++ 		if (ht_nodeid >= 0 &&
++ 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++ 			node = apicid_to_node[ht_nodeid];
++ 		/* Pick a nearby node */
++ 		if (!node_online(node))
++ 			node = nearby_node(apicid);
++ 	}
++	numa_set_node(cpu, node);
++
++	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
++
++static void __init init_amd(struct cpuinfo_x86 *c)
++{
++	unsigned level;
++
++#ifdef CONFIG_SMP
++	unsigned long value;
++
++	/*
++	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
++	 * bit 6 of msr C001_0015
++ 	 *
++	 * Errata 63 for SH-B3 steppings
++	 * Errata 122 for all steppings (F+ have it disabled by default)
++	 */
++	if (c->x86 == 15) {
++		rdmsrl(MSR_K8_HWCR, value);
++		value |= 1 << 6;
++		wrmsrl(MSR_K8_HWCR, value);
++	}
++#endif
++
++	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++	clear_bit(0*32+31, &c->x86_capability);
++	
++	/* On C+ stepping K8 rep microcode works well for copy/memset */
++	level = cpuid_eax(1);
++	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
++		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++
++	/* Enable workaround for FXSAVE leak */
++	if (c->x86 >= 6)
++		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++
++	level = get_model_name(c);
++	if (!level) {
++		switch (c->x86) { 
++		case 15:
++			/* Should distinguish Models here, but this is only
++			   a fallback anyways. */
++			strcpy(c->x86_model_id, "Hammer");
++			break; 
++		} 
++	} 
++	display_cacheinfo(c);
++
++	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++	if (c->x86_power & (1<<8))
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++
++	/* Multi core CPU? */
++	if (c->extended_cpuid_level >= 0x80000008)
++		amd_detect_cmp(c);
++
++	/* Fix cpuid4 emulation for more */
++	num_cache_leaves = 3;
++}
++
++static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	u32 	eax, ebx, ecx, edx;
++	int 	index_msb, core_bits;
++
++	cpuid(1, &eax, &ebx, &ecx, &edx);
++
++
++	if (!cpu_has(c, X86_FEATURE_HT))
++		return;
++ 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++		goto out;
++
++	smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++	if (smp_num_siblings == 1) {
++		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
++	} else if (smp_num_siblings > 1 ) {
++
++		if (smp_num_siblings > NR_CPUS) {
++			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++			smp_num_siblings = 1;
++			return;
++		}
++
++		index_msb = get_count_order(smp_num_siblings);
++		c->phys_proc_id = phys_pkg_id(index_msb);
++
++		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++		index_msb = get_count_order(smp_num_siblings) ;
++
++		core_bits = get_count_order(c->x86_max_cores);
++
++		c->cpu_core_id = phys_pkg_id(index_msb) &
++					       ((1 << core_bits) - 1);
++	}
++out:
++	if ((c->x86_max_cores * smp_num_siblings) > 1) {
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
++		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++	}
++
++#endif
++}
++
++/*
++ * find out the number of processor cores on the die
++ */
++static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++	unsigned int eax, t;
++
++	if (c->cpuid_level < 4)
++		return 1;
++
++	cpuid_count(4, 0, &eax, &t, &t, &t);
++
++	if (eax & 0x1f)
++		return ((eax >> 26) + 1);
++	else
++		return 1;
++}
++
++static void srat_detect_node(void)
++{
++#ifdef CONFIG_NUMA
++	unsigned node;
++	int cpu = smp_processor_id();
++	int apicid = hard_smp_processor_id();
++
++	/* Don't do the funky fallback heuristics the AMD version employs
++	   for now. */
++	node = apicid_to_node[apicid];
++	if (node == NUMA_NO_NODE)
++		node = first_node(node_online_map);
++	numa_set_node(cpu, node);
++
++	if (acpi_numa > 0)
++		printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++}
++
++static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++{
++	/* Cache sizes */
++	unsigned n;
++
++	init_intel_cacheinfo(c);
++	if (c->cpuid_level > 9 ) {
++		unsigned eax = cpuid_eax(10);
++		/* Check for version and the number of counters */
++		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
++			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++	}
++
++	n = c->extended_cpuid_level;
++	if (n >= 0x80000008) {
++		unsigned eax = cpuid_eax(0x80000008);
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++		/* CPUID workaround for Intel 0F34 CPU */
++		if (c->x86_vendor == X86_VENDOR_INTEL &&
++		    c->x86 == 0xF && c->x86_model == 0x3 &&
++		    c->x86_mask == 0x4)
++			c->x86_phys_bits = 36;
++	}
++
++	if (c->x86 == 15)
++		c->x86_cache_alignment = c->x86_clflush_size * 2;
++	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++	set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ 	c->x86_max_cores = intel_num_cpu_cores(c);
++
++	srat_detect_node();
++}
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++	char *v = c->x86_vendor_id;
++
++	if (!strcmp(v, "AuthenticAMD"))
++		c->x86_vendor = X86_VENDOR_AMD;
++	else if (!strcmp(v, "GenuineIntel"))
++		c->x86_vendor = X86_VENDOR_INTEL;
++	else
++		c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
++
++struct cpu_model_info {
++	int vendor;
++	int family;
++	char *model_names[16];
++};
++
++/* Do some early cpuid on the boot CPU to get some parameter that are
++   needed before check_bugs. Everything advanced is in identify_cpu
++   below. */
++void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++{
++	u32 tfms;
++
++	c->loops_per_jiffy = loops_per_jiffy;
++	c->x86_cache_size = -1;
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
++	c->x86_vendor_id[0] = '\0'; /* Unset */
++	c->x86_model_id[0] = '\0';  /* Unset */
++	c->x86_clflush_size = 64;
++	c->x86_cache_alignment = c->x86_clflush_size;
++	c->x86_max_cores = 1;
++	c->extended_cpuid_level = 0;
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++	/* Get vendor name */
++	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++	      (unsigned int *)&c->x86_vendor_id[0],
++	      (unsigned int *)&c->x86_vendor_id[8],
++	      (unsigned int *)&c->x86_vendor_id[4]);
++		
++	get_cpu_vendor(c);
++
++	/* Initialize the standard set of capabilities */
++	/* Note that the vendor-specific code below might override */
++
++	/* Intel-defined flags: level 0x00000001 */
++	if (c->cpuid_level >= 0x00000001) {
++		__u32 misc;
++		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++		      &c->x86_capability[0]);
++		c->x86 = (tfms >> 8) & 0xf;
++		c->x86_model = (tfms >> 4) & 0xf;
++		c->x86_mask = tfms & 0xf;
++		if (c->x86 == 0xf)
++			c->x86 += (tfms >> 20) & 0xff;
++		if (c->x86 >= 0x6)
++			c->x86_model += ((tfms >> 16) & 0xF) << 4;
++		if (c->x86_capability[0] & (1<<19)) 
++			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++	} else {
++		/* Have CPUID level 0 only - unheard of */
++		c->x86 = 4;
++	}
++
++#ifdef CONFIG_SMP
++	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++	int i;
++	u32 xlvl;
++
++	early_identify_cpu(c);
++
++	/* AMD-defined flags: level 0x80000001 */
++	xlvl = cpuid_eax(0x80000000);
++	c->extended_cpuid_level = xlvl;
++	if ((xlvl & 0xffff0000) == 0x80000000) {
++		if (xlvl >= 0x80000001) {
++			c->x86_capability[1] = cpuid_edx(0x80000001);
++			c->x86_capability[6] = cpuid_ecx(0x80000001);
++		}
++		if (xlvl >= 0x80000004)
++			get_model_name(c); /* Default name */
++	}
++
++	/* Transmeta-defined flags: level 0x80860001 */
++	xlvl = cpuid_eax(0x80860000);
++	if ((xlvl & 0xffff0000) == 0x80860000) {
++		/* Don't set x86_cpuid_level here for now to not confuse. */
++		if (xlvl >= 0x80860001)
++			c->x86_capability[2] = cpuid_edx(0x80860001);
++	}
++
++	c->apicid = phys_pkg_id(0);
++
++	/*
++	 * Vendor-specific initialization.  In this section we
++	 * canonicalize the feature flags, meaning if there are
++	 * features a certain CPU supports which CPUID doesn't
++	 * tell us, CPUID claiming incorrect flags, or other bugs,
++	 * we handle them here.
++	 *
++	 * At the end of this section, c->x86_capability better
++	 * indicate the features this CPU genuinely supports!
++	 */
++	switch (c->x86_vendor) {
++	case X86_VENDOR_AMD:
++		init_amd(c);
++		break;
++
++	case X86_VENDOR_INTEL:
++		init_intel(c);
++		break;
++
++	case X86_VENDOR_UNKNOWN:
++	default:
++		display_cacheinfo(c);
++		break;
++	}
++
++	select_idle_routine(c);
++	detect_ht(c); 
++
++	/*
++	 * On SMP, boot_cpu_data holds the common feature set between
++	 * all CPUs; so make sure that we indicate which features are
++	 * common between the CPUs.  The first time this routine gets
++	 * executed, c == &boot_cpu_data.
++	 */
++	if (c != &boot_cpu_data) {
++		/* AND the already accumulated flags with these */
++		for (i = 0 ; i < NCAPINTS ; i++)
++			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	}
++
++#ifdef CONFIG_X86_MCE
++	mcheck_init(c);
++#endif
++	if (c == &boot_cpu_data)
++		mtrr_bp_init();
++	else
++		mtrr_ap_init();
++#ifdef CONFIG_NUMA
++	numa_add_cpu(smp_processor_id());
++#endif
++}
++ 
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++	if (c->x86_model_id[0])
++		printk("%s", c->x86_model_id);
++
++	if (c->x86_mask || c->cpuid_level >= 0) 
++		printk(" stepping %02x\n", c->x86_mask);
++	else
++		printk("\n");
++}
++
++/*
++ *	Get CPU information for use by the procfs.
++ */
++
++static int show_cpuinfo(struct seq_file *m, void *v)
++{
++	struct cpuinfo_x86 *c = v;
++
++	/* 
++	 * These flag bits must match the definitions in <asm/cpufeature.h>.
++	 * NULL means this bit is undefined or reserved; either way it doesn't
++	 * have meaning as far as Linux is concerned.  Note that it's important
++	 * to realize there is a difference between this table and CPUID -- if
++	 * applications want to get the raw CPUID data, they should access
++	 * /dev/cpu/<cpu_nr>/cpuid instead.
++	 */
++	static char *x86_cap_flags[] = {
++		/* Intel-defined */
++	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++
++		/* AMD-defined */
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++		NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++
++		/* Transmeta-defined */
++		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* Other (Linux-defined) */
++		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++		"constant_tsc", NULL, NULL,
++		"up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* Intel-defined (#2) */
++		"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
++		"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* VIA/Cyrix/Centaur-defined */
++		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* AMD-defined (#2) */
++		"lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	};
++	static char *x86_power_flags[] = { 
++		"ts",	/* temperature sensor */
++		"fid",  /* frequency id control */
++		"vid",  /* voltage id control */
++		"ttp",  /* thermal trip */
++		"tm",
++		"stc",
++		NULL,
++		/* nothing */	/* constant_tsc - moved to flags */
++	};
++
++
++#ifdef CONFIG_SMP
++	if (!cpu_online(c-cpu_data))
++		return 0;
++#endif
++
++	seq_printf(m,"processor\t: %u\n"
++		     "vendor_id\t: %s\n"
++		     "cpu family\t: %d\n"
++		     "model\t\t: %d\n"
++		     "model name\t: %s\n",
++		     (unsigned)(c-cpu_data),
++		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++		     c->x86,
++		     (int)c->x86_model,
++		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
++	
++	if (c->x86_mask || c->cpuid_level >= 0)
++		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++	else
++		seq_printf(m, "stepping\t: unknown\n");
++	
++	if (cpu_has(c,X86_FEATURE_TSC)) {
++		unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
++		if (!freq)
++			freq = cpu_khz;
++		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++			     freq / 1000, (freq % 1000));
++	}
++
++	/* Cache size */
++	if (c->x86_cache_size >= 0) 
++		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++	
++#ifdef CONFIG_SMP
++	if (smp_num_siblings * c->x86_max_cores > 1) {
++		int cpu = c - cpu_data;
++		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
++		seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
++		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
++		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++	}
++#endif	
++
++	seq_printf(m,
++	        "fpu\t\t: yes\n"
++	        "fpu_exception\t: yes\n"
++	        "cpuid level\t: %d\n"
++	        "wp\t\t: yes\n"
++	        "flags\t\t:",
++		   c->cpuid_level);
++
++	{ 
++		int i; 
++		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++				seq_printf(m, " %s", x86_cap_flags[i]);
++	}
++		
++	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++		   c->loops_per_jiffy/(500000/HZ),
++		   (c->loops_per_jiffy/(5000/HZ)) % 100);
++
++	if (c->x86_tlbsize > 0) 
++		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++
++	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
++		   c->x86_phys_bits, c->x86_virt_bits);
++
++	seq_printf(m, "power management:");
++	{
++		unsigned i;
++		for (i = 0; i < 32; i++) 
++			if (c->x86_power & (1 << i)) {
++				if (i < ARRAY_SIZE(x86_power_flags) &&
++					x86_power_flags[i])
++					seq_printf(m, "%s%s",
++						x86_power_flags[i][0]?" ":"",
++						x86_power_flags[i]);
++				else
++					seq_printf(m, " [%d]", i);
++			}
++	}
++
++	seq_printf(m, "\n\n");
++
++	return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++	++*pos;
++	return c_start(m, pos);
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++struct seq_operations cpuinfo_op = {
++	.start =c_start,
++	.next =	c_next,
++	.stop =	c_stop,
++	.show =	show_cpuinfo,
++};
++
++#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
++#include <linux/platform_device.h>
++static __init int add_pcspkr(void)
++{
++	struct platform_device *pd;
++	int ret;
++
++	if (!is_initial_xendomain())
++		return 0;
++
++	pd = platform_device_alloc("pcspkr", -1);
++	if (!pd)
++		return -ENOMEM;
++
++	ret = platform_device_add(pd);
++	if (ret)
++		platform_device_put(pd);
++
++	return ret;
++}
++device_initcall(add_pcspkr);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/setup.c
+--- a/arch/x86_64/kernel/setup.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/setup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -122,7 +122,9 @@
+ 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ 	{ .name = "timer1", .start = 0x50, .end = 0x53,
+ 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
+-	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
++	{ .name = "keyboard", .start = 0x60, .end = 0x60,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "keyboard", .start = 0x64, .end = 0x64,
+ 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ 	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
+ 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/setup64-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/setup64-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,367 @@
++/* 
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995  Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ *
++ * Jun Nakajima <jun.nakajima at intel.com> 
++ *   Modified for Xen
++ *
++ */ 
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/proto.h>
++#include <asm/sections.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
++
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
++struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++
++#ifndef CONFIG_X86_NO_IDT
++struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 
++#endif
++
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++
++unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL(__supported_pte_mask);
++static int do_not_nx __cpuinitdata = 0;
++
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
++
++on	Enable(default)
++off	Disable
++*/ 
++int __init nonx_setup(char *str)
++{
++	if (!strncmp(str, "on", 2)) {
++                __supported_pte_mask |= _PAGE_NX; 
++ 		do_not_nx = 0; 
++	} else if (!strncmp(str, "off", 3)) {
++		do_not_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
++        }
++	return 1;
++} 
++__setup("noexec=", nonx_setup);	/* parsed early actually */
++
++int force_personality32 = 0; 
++
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
++
++on	PROT_READ does not imply PROT_EXEC for 32bit processes
++off	PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++	if (!strcmp(str, "on"))
++		force_personality32 &= ~READ_IMPLIES_EXEC;
++	else if (!strcmp(str, "off"))
++		force_personality32 |= READ_IMPLIES_EXEC;
++	return 1;
++}
++__setup("noexec32=", nonx32_setup);
++
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{ 
++	int i;
++	unsigned long size;
++
++#ifdef CONFIG_HOTPLUG_CPU
++	prefill_possible_map();
++#endif
++
++	/* Copy section for each CPU (we discard the original) */
++	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
++#ifdef CONFIG_MODULES
++	if (size < PERCPU_ENOUGH_ROOM)
++		size = PERCPU_ENOUGH_ROOM;
++#endif
++
++	for_each_cpu_mask (i, cpu_possible_map) {
++		char *ptr;
++
++		if (!NODE_DATA(cpu_to_node(i))) {
++			printk("cpu with no node %d, num_online_nodes %d\n",
++			       i, num_online_nodes());
++			ptr = alloc_bootmem(size);
++		} else { 
++			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++		}
++		if (!ptr)
++			panic("Cannot allocate cpu data for CPU %d\n", i);
++		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
++		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++	}
++} 
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++	xen_pt_switch(__pa_symbol(init_level4_pgt));
++	xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++	unsigned long frames[16];
++	unsigned long va;
++	int f;
++
++	for (va = gdt_descr->address, f = 0;
++	     va < gdt_descr->address + gdt_descr->size;
++	     va += PAGE_SIZE, f++) {
++		frames[f] = virt_to_mfn(va);
++		make_page_readonly(
++			(void *)va, XENFEAT_writable_descriptor_tables);
++	}
++	if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) /
++                               sizeof (struct desc_struct)))
++		BUG();
++}
++#else
++static void switch_pt(void)
++{
++	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++	asm volatile("lgdt %0" :: "m" (*gdt_descr));
++	asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
++
++void pda_init(int cpu)
++{ 
++	struct x8664_pda *pda = cpu_pda(cpu);
++
++	/* Setup up data that may be needed in __get_free_pages early */
++	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
++#ifndef CONFIG_XEN
++	wrmsrl(MSR_GS_BASE, pda);
++#else
++	if (HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
++					(unsigned long)pda))
++		BUG();
++#endif
++	pda->cpunumber = cpu; 
++	pda->irqcount = -1;
++	pda->kernelstack = 
++		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
++	pda->active_mm = &init_mm;
++	pda->mmu_state = 0;
++
++	if (cpu == 0) {
++#ifdef CONFIG_XEN
++		xen_init_pt();
++#endif
++		/* others are initialized in smpboot.c */
++		pda->pcurrent = &init_task;
++		pda->irqstackptr = boot_cpu_stack; 
++	} else {
++		pda->irqstackptr = (char *)
++			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++		if (!pda->irqstackptr)
++			panic("cannot allocate irqstack for cpu %d", cpu); 
++	}
++
++	switch_pt();
++
++	pda->irqstackptr += IRQSTACKSIZE-64;
++} 
++
++#ifndef CONFIG_X86_NO_TSS
++char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
++__attribute__((section(".bss.page_aligned")));
++#endif
++
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
++{
++#ifndef CONFIG_XEN
++	/* 
++	 * LSTAR and STAR live in a bit strange symbiosis.
++	 * They both write to the same internal register. STAR allows to set CS/DS
++	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
++	 */ 
++	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
++	wrmsrl(MSR_LSTAR, system_call); 
++
++	/* Flags to clear on syscall */
++	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
++#endif
++#ifdef CONFIG_IA32_EMULATION   		
++	syscall32_cpu_init ();
++#endif
++}
++
++void __cpuinit check_efer(void)
++{
++	unsigned long efer;
++
++	rdmsrl(MSR_EFER, efer); 
++        if (!(efer & EFER_NX) || do_not_nx) { 
++                __supported_pte_mask &= ~_PAGE_NX; 
++        }       
++}
++
++unsigned long kernel_eflags;
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++	int cpu = stack_smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct *t = &per_cpu(init_tss, cpu);
++	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
++	unsigned long v; 
++	char *estacks = NULL; 
++	unsigned i;
++#endif
++	struct task_struct *me;
++
++	/* CPU 0 is initialised in head64.c */
++	if (cpu != 0) {
++		pda_init(cpu);
++		zap_low_mappings(cpu);
++	}
++#ifndef CONFIG_X86_NO_TSS
++	else
++		estacks = boot_exception_stacks; 
++#endif
++
++	me = current;
++
++	if (cpu_test_and_set(cpu, cpu_initialized))
++		panic("CPU#%d already initialized!\n", cpu);
++
++	printk("Initializing CPU#%d\n", cpu);
++
++	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++
++	/*
++	 * Initialize the per-CPU GDT with the boot GDT,
++	 * and set up the GDT descriptor:
++	 */
++#ifndef CONFIG_XEN 
++	if (cpu)
++ 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++#endif
++
++	cpu_gdt_descr[cpu].size = GDT_SIZE;
++	cpu_gdt_init(&cpu_gdt_descr[cpu]);
++
++	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
++	syscall_init();
++
++	wrmsrl(MSR_FS_BASE, 0);
++	wrmsrl(MSR_KERNEL_GS_BASE, 0);
++	barrier(); 
++
++	check_efer();
++
++#ifndef CONFIG_X86_NO_TSS
++	/*
++	 * set up and load the per-CPU TSS
++	 */
++	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++		if (cpu) {
++			static const unsigned int order[N_EXCEPTION_STACKS] = {
++				[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++				[DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++			};
++
++			estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
++			if (!estacks)
++				panic("Cannot allocate exception stack %ld %d\n",
++				      v, cpu); 
++		}
++		switch (v + 1) {
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++		case DEBUG_STACK:
++			cpu_pda(cpu)->debugstack = (unsigned long)estacks;
++			estacks += DEBUG_STKSZ;
++			break;
++#endif
++		default:
++			estacks += EXCEPTION_STKSZ;
++			break;
++		}
++		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++	}
++
++	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++	/*
++	 * <= is required because the CPU will access up to
++	 * 8 bits beyond the end of the IO permission bitmap.
++	 */
++	for (i = 0; i <= IO_BITMAP_LONGS; i++)
++		t->io_bitmap[i] = ~0UL;
++#endif
++
++	atomic_inc(&init_mm.mm_count);
++	me->active_mm = &init_mm;
++	if (me->mm)
++		BUG();
++	enter_lazy_tlb(&init_mm, me);
++
++#ifndef CONFIG_X86_NO_TSS
++	set_tss_desc(cpu, t);
++#endif
++#ifndef CONFIG_XEN
++	load_TR_desc();
++#endif
++	load_LDT(&init_mm.context);
++
++	/*
++	 * Clear all 6 debug registers:
++	 */
++
++	set_debugreg(0UL, 0);
++	set_debugreg(0UL, 1);
++	set_debugreg(0UL, 2);
++	set_debugreg(0UL, 3);
++	set_debugreg(0UL, 6);
++	set_debugreg(0UL, 7);
++
++	fpu_init(); 
++
++	raw_local_save_flags(kernel_eflags);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/signal.c
+--- a/arch/x86_64/kernel/signal.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/signal.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -36,37 +36,6 @@
+                sigset_t *set, struct pt_regs * regs); 
+ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+             sigset_t *set, struct pt_regs * regs); 
+-
+-asmlinkage long
+-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+-{
+-	sigset_t saveset, newset;
+-
+-	/* XXX: Don't preclude handling different sized sigset_t's.  */
+-	if (sigsetsize != sizeof(sigset_t))
+-		return -EINVAL;
+-
+-	if (copy_from_user(&newset, unewset, sizeof(newset)))
+-		return -EFAULT;
+-	sigdelsetmask(&newset, ~_BLOCKABLE);
+-
+-	spin_lock_irq(&current->sighand->siglock);
+-	saveset = current->blocked;
+-	current->blocked = newset;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
+-#ifdef DEBUG_SIG
+-	printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
+-		saveset, newset, regs, regs->rip);
+-#endif 
+-	regs->rax = -EINTR;
+-	while (1) {
+-		current->state = TASK_INTERRUPTIBLE;
+-		schedule();
+-		if (do_signal(regs, &saveset))
+-			return -EINTR;
+-	}
+-}
+ 
+ asmlinkage long
+ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+@@ -341,11 +310,11 @@
+ 		current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+ 
+-	return 1;
++	return 0;
+ 
+ give_sigsegv:
+ 	force_sigsegv(sig, current);
+-	return 0;
++	return -EFAULT;
+ }
+ 
+ /*
+@@ -408,7 +377,7 @@
+ #endif
+ 	ret = setup_rt_frame(sig, ka, info, oldset, regs);
+ 
+-	if (ret) {
++	if (ret == 0) {
+ 		spin_lock_irq(&current->sighand->siglock);
+ 		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ 		if (!(ka->sa.sa_flags & SA_NODEFER))
+@@ -425,11 +394,12 @@
+  * want to handle. Thus you cannot kill init even with a SIGKILL even by
+  * mistake.
+  */
+-int do_signal(struct pt_regs *regs, sigset_t *oldset)
++static void do_signal(struct pt_regs *regs)
+ {
+ 	struct k_sigaction ka;
+ 	siginfo_t info;
+ 	int signr;
++	sigset_t *oldset;
+ 
+ 	/*
+ 	 * We want the common case to go fast, which
+@@ -438,9 +408,11 @@
+ 	 * if so.
+ 	 */
+ 	if (!user_mode(regs))
+-		return 1;
++		return;
+ 
+-	if (!oldset)
++	if (test_thread_flag(TIF_RESTORE_SIGMASK))
++		oldset = &current->saved_sigmask;
++	else
+ 		oldset = &current->blocked;
+ 
+ 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+@@ -454,30 +426,46 @@
+ 			set_debugreg(current->thread.debugreg7, 7);
+ 
+ 		/* Whee!  Actually deliver the signal.  */
+-		return handle_signal(signr, &info, &ka, oldset, regs);
++		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
++			/* a signal was successfully delivered; the saved
++			 * sigmask will have been stored in the signal frame,
++			 * and will be restored by sigreturn, so we can simply
++			 * clear the TIF_RESTORE_SIGMASK flag */
++			clear_thread_flag(TIF_RESTORE_SIGMASK);
++		}
++		return;
+ 	}
+ 
+ 	/* Did we come from a system call? */
+ 	if ((long)regs->orig_rax >= 0) {
+ 		/* Restart the system call - no handlers present */
+ 		long res = regs->rax;
+-		if (res == -ERESTARTNOHAND ||
+-		    res == -ERESTARTSYS ||
+-		    res == -ERESTARTNOINTR) {
++		switch (res) {
++		case -ERESTARTNOHAND:
++		case -ERESTARTSYS:
++		case -ERESTARTNOINTR:
+ 			regs->rax = regs->orig_rax;
+ 			regs->rip -= 2;
+-		}
+-		if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
++			break;
++		case -ERESTART_RESTARTBLOCK:
+ 			regs->rax = test_thread_flag(TIF_IA32) ?
+ 					__NR_ia32_restart_syscall :
+ 					__NR_restart_syscall;
+ 			regs->rip -= 2;
++			break;
+ 		}
+ 	}
+-	return 0;
++
++	/* if there's no signal to deliver, we just put the saved sigmask
++	   back. */
++	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++		clear_thread_flag(TIF_RESTORE_SIGMASK);
++		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++	}
+ }
+ 
+-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
++void
++do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+ {
+ #ifdef DEBUG_SIG
+ 	printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
+@@ -491,8 +479,8 @@
+ 	}
+ 
+ 	/* deal with pending signal delivery */
+-	if (thread_info_flags & _TIF_SIGPENDING)
+-		do_signal(regs,oldset);
++	if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
++		do_signal(regs);
+ }
+ 
+ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/smp-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/smp-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,575 @@
++/*
++ *	Intel SMP support routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
++ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
++
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#include <asm/idle.h>
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ *	Smarter SMP flushing macros. 
++ *		c/o Linus Torvalds.
++ *
++ *	These mean you can really definitely utterly forget about
++ *	writing to user space from interrupts. (Its not allowed anyway).
++ *
++ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
++ *
++ * 	More scalable flush, from Andi Kleen
++ *
++ * 	To avoid global state use 8 different call vectors.
++ * 	Each CPU uses a specific vector to trigger flushes on other
++ * 	CPUs. Depending on the received vector the target CPUs look into
++ *	the right per cpu variable for the flush data.
++ *
++ * 	With more than 8 CPUs they are hashed to the 8 available
++ * 	vectors. The limited global vector space forces us to this right now.
++ *	In future when interrupts are split into per CPU domains this could be
++ *	fixed, at the cost of triggering multiple IPIs in some cases.
++ */
++
++union smp_flush_state {
++	struct {
++		cpumask_t flush_cpumask;
++		struct mm_struct *flush_mm;
++		unsigned long flush_va;
++#define FLUSH_ALL	-1ULL
++		spinlock_t tlbstate_lock;
++	};
++	char pad[SMP_CACHE_BYTES];
++} ____cacheline_aligned;
++
++/* State is put into the per CPU data section, but padded
++   to a full cache line because other CPUs can access it and we don't
++   want false sharing in the per cpu data segment. */
++static DEFINE_PER_CPU(union smp_flush_state, flush_state);
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context, 
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm(unsigned long cpu)
++{
++	if (read_pda(mmu_state) == TLBSTATE_OK)
++		BUG();
++	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
++	load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * 	Stop ipi delivery for the old mm. This is not synchronized with
++ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * 	for the wrong mm, and in the worst case we perform a superfluous
++ * 	tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * 	Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * 	Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ *	cpu active_mm is correct, cpu0 already handles
++ *	flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * 	Atomically set the bit [other cpus will start sending flush ipis],
++ * 	and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ *   runs in kernel space, the cpu could load tlb entries for user space
++ *   pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ *
++ * Interrupts are disabled.
++ */
++
++asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
++{
++	int cpu;
++	int sender;
++	union smp_flush_state *f;
++
++	cpu = smp_processor_id();
++	/*
++	 * orig_rax contains the negated interrupt vector.
++	 * Use that to determine where the sender put the data.
++	 */
++	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
++	f = &per_cpu(flush_state, sender);
++
++	if (!cpu_isset(cpu, f->flush_cpumask))
++		goto out;
++		/* 
++		 * This was a BUG() but until someone can quote me the
++		 * line from the intel manual that guarantees an IPI to
++		 * multiple CPUs is retried _only_ on the erroring CPUs
++		 * its staying as a return
++		 *
++		 * BUG();
++		 */
++		 
++	if (f->flush_mm == read_pda(active_mm)) {
++		if (read_pda(mmu_state) == TLBSTATE_OK) {
++			if (f->flush_va == FLUSH_ALL)
++				local_flush_tlb();
++			else
++				__flush_tlb_one(f->flush_va);
++		} else
++			leave_mm(cpu);
++	}
++out:
++	ack_APIC_irq();
++	cpu_clear(cpu, f->flush_cpumask);
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++						unsigned long va)
++{
++	int sender;
++	union smp_flush_state *f;
++
++	/* Caller has disabled preemption */
++	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
++	f = &per_cpu(flush_state, sender);
++
++	/* Could avoid this lock when
++	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++	   probably not worth checking this for a cache-hot lock. */
++	spin_lock(&f->tlbstate_lock);
++
++	f->flush_mm = mm;
++	f->flush_va = va;
++	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++
++	/*
++	 * We have to send the IPI only to
++	 * CPUs affected.
++	 */
++	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
++
++	while (!cpus_empty(f->flush_cpumask))
++		cpu_relax();
++
++	f->flush_mm = NULL;
++	f->flush_va = 0;
++	spin_unlock(&f->tlbstate_lock);
++}
++
++int __cpuinit init_smp_flush(void)
++{
++	int i;
++	for_each_cpu_mask(i, cpu_possible_map) {
++		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++	}
++	return 0;
++}
++
++core_initcall(init_smp_flush);
++	
++void flush_tlb_current_task(void)
++{
++	struct mm_struct *mm = current->mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	local_flush_tlb();
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_current_task);
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if (current->mm)
++			local_flush_tlb();
++		else
++			leave_mm(smp_processor_id());
++	}
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_mm);
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++	struct mm_struct *mm = vma->vm_mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if(current->mm)
++			__flush_tlb_one(va);
++		 else
++		 	leave_mm(smp_processor_id());
++	}
++
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, va);
++
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++	unsigned long cpu = smp_processor_id();
++
++	__flush_tlb_all();
++	if (read_pda(mmu_state) == TLBSTATE_LAZY)
++		leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++#endif /* Xen */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++
++void smp_send_reschedule(int cpu)
++{
++	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++	void (*func) (void *info);
++	void *info;
++	atomic_t started;
++	atomic_t finished;
++	int wait;
++};
++
++static struct call_data_struct * call_data;
++
++void lock_ipi_call_lock(void)
++{
++	spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++	spin_unlock_irq(&call_lock);
++}
++
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++				int nonatomic, int wait)
++{
++	struct call_data_struct data;
++	int cpus = 1;
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		cpu_relax();
++
++	if (!wait)
++		return;
++
++	while (atomic_read(&data.finished) != cpus)
++		cpu_relax();
++}
++
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++	int nonatomic, int wait)
++{
++	/* prevent preemption and reschedule on another processor */
++	int me = get_cpu();
++	if (cpu == me) {
++		WARN_ON(1);
++		put_cpu();
++		return -EBUSY;
++	}
++	spin_lock_bh(&call_lock);
++	__smp_call_function_single(cpu, func, info, nonatomic, wait);
++	spin_unlock_bh(&call_lock);
++	put_cpu();
++	return 0;
++}
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++static void __smp_call_function (void (*func) (void *info), void *info,
++				int nonatomic, int wait)
++{
++	struct call_data_struct data;
++	int cpus = num_online_cpus()-1;
++
++	if (!cpus)
++		return;
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		cpu_relax();
++
++	if (!wait)
++		return;
++
++	while (atomic_read(&data.finished) != cpus)
++		cpu_relax();
++}
++
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ *        CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++			int wait)
++{
++	spin_lock(&call_lock);
++	__smp_call_function(func,info,nonatomic,wait);
++	spin_unlock(&call_lock);
++	return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++void smp_stop_cpu(void)
++{
++	unsigned long flags;
++	/*
++	 * Remove this CPU:
++	 */
++	cpu_clear(smp_processor_id(), cpu_online_map);
++	local_irq_save(flags);
++	disable_all_local_evtchn();
++	local_irq_restore(flags); 
++}
++
++static void smp_really_stop_cpu(void *dummy)
++{
++	smp_stop_cpu(); 
++	for (;;) 
++		halt();
++} 
++
++void smp_send_stop(void)
++{
++	int nolock = 0;
++#ifndef CONFIG_XEN
++	if (reboot_force)
++		return;
++#endif
++	/* Don't deadlock on the call lock in panic */
++	if (!spin_trylock(&call_lock)) {
++		/* ignore locking because we have panicked anyways */
++		nolock = 1;
++	}
++	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++	if (!nolock)
++		spin_unlock(&call_lock);
++
++	local_irq_disable();
++	disable_all_local_evtchn();
++	local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
++{
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#else
++	return IRQ_HANDLED;
++#endif
++}
++
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++	void (*func) (void *info) = call_data->func;
++	void *info = call_data->info;
++	int wait = call_data->wait;
++
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#endif
++	/*
++	 * Notify initiating CPU that I've grabbed the data and am
++	 * about to execute the function
++	 */
++	mb();
++	atomic_inc(&call_data->started);
++	/*
++	 * At this point the info structure may be out of scope unless wait==1
++	 */
++	exit_idle();
++	irq_enter();
++	(*func)(info);
++	irq_exit();
++	if (wait) {
++		mb();
++		atomic_inc(&call_data->finished);
++	}
++#ifdef CONFIG_XEN
++	return IRQ_HANDLED;
++#endif
++}
++
++int safe_smp_processor_id(void)
++{
++#ifdef CONFIG_XEN
++	return smp_processor_id();
++#else
++	unsigned apicid, i;
++
++	if (disable_apic)
++		return 0;
++
++	apicid = hard_smp_processor_id();
++	if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
++		return apicid;
++
++	for (i = 0; i < NR_CPUS; ++i) {
++		if (x86_cpu_to_apicid[i] == apicid)
++			return i;
++	}
++
++	/* No entries in x86_cpu_to_apicid?  Either no MPS|ACPI,
++	 * or called too early.  Either way, we must be CPU 0. */
++      	if (x86_cpu_to_apicid[0] == BAD_APICID)
++		return 0;
++
++	return 0; /* Should not happen */
++#endif
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/suspend.c
+--- a/arch/x86_64/kernel/suspend.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/suspend.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -114,12 +114,14 @@
+ 
+ void fix_processor_context(void)
+ {
++#ifndef CONFIG_X86_NO_TSS    
+ 	int cpu = smp_processor_id();
+ 	struct tss_struct *t = &per_cpu(init_tss, cpu);
+ 
+ 	set_tss_desc(cpu,t);	/* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+ 
+ 	cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
++#endif
+ 
+ 	syscall_init();                         /* This sets MSR_*STAR and related */
+ 	load_TR_desc();				/* This does ltr */
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/traps-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/traps-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1173 @@
++/*
++ *  linux/arch/x86-64/traps.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/nmi.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/kdebug.h>
++#include <asm/processor.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/pgalloc.h>
++#include <asm/pda.h>
++#include <asm/proto.h>
++#include <asm/nmi.h>
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void double_fault(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void reserved(void);
++asmlinkage void alignment_check(void);
++asmlinkage void machine_check(void);
++asmlinkage void spurious_interrupt_bug(void);
++
++ATOMIC_NOTIFIER_HEAD(die_chain);
++EXPORT_SYMBOL(die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++	return atomic_notifier_chain_unregister(&die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline void conditional_sti(struct pt_regs *regs)
++{
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
++}
++
++static inline void preempt_conditional_sti(struct pt_regs *regs)
++{
++	preempt_disable();
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
++}
++
++static inline void preempt_conditional_cli(struct pt_regs *regs)
++{
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_disable();
++	/* Make sure to not schedule here because we could be running
++	   on an exception stack. */
++	preempt_enable_no_resched();
++}
++
++static int kstack_depth_to_print = 12;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++
++#ifdef CONFIG_KALLSYMS
++# include <linux/kallsyms.h>
++void printk_address(unsigned long address)
++{
++	unsigned long offset = 0, symsize;
++	const char *symname;
++	char *modname;
++	char *delim = ":";
++	char namebuf[128];
++
++	symname = kallsyms_lookup(address, &symsize, &offset,
++					&modname, namebuf);
++	if (!symname) {
++		printk(" [<%016lx>]\n", address);
++		return;
++	}
++	if (!modname)
++		modname = delim = ""; 		
++	printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
++		address, delim, modname, delim, symname, offset, symsize);
++}
++#else
++void printk_address(unsigned long address)
++{
++	printk(" [<%016lx>]\n", address);
++}
++#endif
++
++static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
++					unsigned *usedp, const char **idp)
++{
++#ifndef CONFIG_X86_NO_TSS
++	static char ids[][8] = {
++		[DEBUG_STACK - 1] = "#DB",
++		[NMI_STACK - 1] = "NMI",
++		[DOUBLEFAULT_STACK - 1] = "#DF",
++		[STACKFAULT_STACK - 1] = "#SS",
++		[MCE_STACK - 1] = "#MC",
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++		[N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
++#endif
++	};
++	unsigned k;
++
++	/*
++	 * Iterate over all exception stacks, and figure out whether
++	 * 'stack' is in one of them:
++	 */
++	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
++		unsigned long end;
++
++		/*
++		 * set 'end' to the end of the exception stack.
++		 */
++		switch (k + 1) {
++		/*
++		 * TODO: this block is not needed i think, because
++		 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
++		 * properly too.
++		 */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++		case DEBUG_STACK:
++			end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
++			break;
++#endif
++		default:
++			end = per_cpu(orig_ist, cpu).ist[k];
++			break;
++		}
++		/*
++		 * Is 'stack' above this exception frame's end?
++		 * If yes then skip to the next frame.
++		 */
++		if (stack >= end)
++			continue;
++		/*
++		 * Is 'stack' above this exception frame's start address?
++		 * If yes then we found the right frame.
++		 */
++		if (stack >= end - EXCEPTION_STKSZ) {
++			/*
++			 * Make sure we only iterate through an exception
++			 * stack once. If it comes up for the second time
++			 * then there's something wrong going on - just
++			 * break out and return NULL:
++			 */
++			if (*usedp & (1U << k))
++				break;
++			*usedp |= 1U << k;
++			*idp = ids[k];
++			return (unsigned long *)end;
++		}
++		/*
++		 * If this is a debug stack, and if it has a larger size than
++		 * the usual exception stacks, then 'stack' might still
++		 * be within the lower portion of the debug stack:
++		 */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
++			unsigned j = N_EXCEPTION_STACKS - 1;
++
++			/*
++			 * Black magic. A large debug stack is composed of
++			 * multiple exception stack entries, which we
++			 * iterate through now. Dont look:
++			 */
++			do {
++				++j;
++				end -= EXCEPTION_STKSZ;
++				ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
++			} while (stack < end - EXCEPTION_STKSZ);
++			if (*usedp & (1U << j))
++				break;
++			*usedp |= 1U << j;
++			*idp = ids[j];
++			return (unsigned long *)end;
++		}
++#endif
++	}
++#endif
++	return NULL;
++}
++
++static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++{
++	int n = 0;
++
++	while (unwind(info) == 0 && UNW_PC(info)) {
++		n++;
++		printk_address(UNW_PC(info));
++		if (arch_unw_user_mode(info))
++			break;
++	}
++	return n;
++}
++
++/*
++ * x86-64 can have upto three kernel stacks: 
++ * process stack
++ * interrupt stack
++ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ */
++
++void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++{
++	const unsigned cpu = safe_smp_processor_id();
++	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
++	unsigned used = 0;
++
++	printk("\nCall Trace:\n");
++
++	if (!tsk)
++		tsk = current;
++
++	if (call_trace >= 0) {
++		int unw_ret = 0;
++		struct unwind_frame_info info;
++
++		if (regs) {
++			if (unwind_init_frame_info(&info, tsk, regs) == 0)
++				unw_ret = show_trace_unwind(&info, NULL);
++		} else if (tsk == current)
++			unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
++		else {
++			if (unwind_init_blocked(&info, tsk) == 0)
++				unw_ret = show_trace_unwind(&info, NULL);
++		}
++		if (unw_ret > 0) {
++			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++				print_symbol("DWARF2 unwinder stuck at %s\n",
++					     UNW_PC(&info));
++				if ((long)UNW_SP(&info) < 0) {
++					printk("Leftover inexact backtrace:\n");
++					stack = (unsigned long *)UNW_SP(&info);
++				} else
++					printk("Full inexact backtrace again:\n");
++			} else if (call_trace >= 1)
++				return;
++			else
++				printk("Full inexact backtrace again:\n");
++		} else
++			printk("Inexact backtrace:\n");
++	}
++
++	/*
++	 * Print function call entries within a stack. 'cond' is the
++	 * "end of stackframe" condition, that the 'stack++'
++	 * iteration will eventually trigger.
++	 */
++#define HANDLE_STACK(cond) \
++	do while (cond) { \
++		unsigned long addr = *stack++; \
++		if (kernel_text_address(addr)) { \
++			/* \
++			 * If the address is either in the text segment of the \
++			 * kernel, or in the region which contains vmalloc'ed \
++			 * memory, it *may* be the address of a calling \
++			 * routine; if so, print it so that someone tracing \
++			 * down the cause of the crash will be able to figure \
++			 * out the call path that was taken. \
++			 */ \
++			printk_address(addr); \
++		} \
++	} while (0)
++
++	/*
++	 * Print function call entries in all stacks, starting at the
++	 * current stack address. If the stacks consist of nested
++	 * exceptions
++	 */
++	for ( ; ; ) {
++		const char *id;
++		unsigned long *estack_end;
++		estack_end = in_exception_stack(cpu, (unsigned long)stack,
++						&used, &id);
++
++		if (estack_end) {
++			printk(" <%s>", id);
++			HANDLE_STACK (stack < estack_end);
++			printk(" <EOE>");
++			/*
++			 * We link to the next stack via the
++			 * second-to-last pointer (index -2 to end) in the
++			 * exception stack:
++			 */
++			stack = (unsigned long *) estack_end[-2];
++			continue;
++		}
++		if (irqstack_end) {
++			unsigned long *irqstack;
++			irqstack = irqstack_end -
++				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
++
++			if (stack >= irqstack && stack < irqstack_end) {
++				printk(" <IRQ>");
++				HANDLE_STACK (stack < irqstack_end);
++				/*
++				 * We link to the next stack (which would be
++				 * the process stack normally) the last
++				 * pointer (index -1 to end) in the IRQ stack:
++				 */
++				stack = (unsigned long *) (irqstack_end[-1]);
++				irqstack_end = NULL;
++				printk(" <EOI>");
++				continue;
++			}
++		}
++		break;
++	}
++
++	/*
++	 * This prints the process stack:
++	 */
++	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++#undef HANDLE_STACK
++
++	printk("\n");
++}
++
++static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++{
++	unsigned long *stack;
++	int i;
++	const int cpu = safe_smp_processor_id();
++	unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
++	unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++
++	// debugging aid: "show_stack(NULL, NULL);" prints the
++	// back trace for this cpu.
++
++	if (rsp == NULL) {
++		if (tsk)
++			rsp = (unsigned long *)tsk->thread.rsp;
++		else
++			rsp = (unsigned long *)&rsp;
++	}
++
++	stack = rsp;
++	for(i=0; i < kstack_depth_to_print; i++) {
++		if (stack >= irqstack && stack <= irqstack_end) {
++			if (stack == irqstack_end) {
++				stack = (unsigned long *) (irqstack_end[-1]);
++				printk(" <EOI> ");
++			}
++		} else {
++		if (((long) stack & (THREAD_SIZE-1)) == 0)
++			break;
++		}
++		if (i && ((i % 4) == 0))
++			printk("\n");
++		printk(" %016lx", *stack++);
++		touch_nmi_watchdog();
++	}
++	show_trace(tsk, regs, rsp);
++}
++
++void show_stack(struct task_struct *tsk, unsigned long * rsp)
++{
++	_show_stack(tsk, NULL, rsp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++	unsigned long dummy;
++	show_trace(NULL, NULL, &dummy);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++	int i;
++	int in_kernel = !user_mode(regs);
++	unsigned long rsp;
++	const int cpu = safe_smp_processor_id(); 
++	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
++
++		rsp = regs->rsp;
++
++	printk("CPU %d ", cpu);
++	__show_regs(regs);
++	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
++		cur->comm, cur->pid, task_thread_info(cur), cur);
++
++	/*
++	 * When in-kernel, we also print out the stack and code at the
++	 * time of the fault..
++	 */
++	if (in_kernel) {
++
++		printk("Stack: ");
++		_show_stack(NULL, regs, (unsigned long*)rsp);
++
++		printk("\nCode: ");
++		if (regs->rip < PAGE_OFFSET)
++			goto bad;
++
++		for (i=0; i<20; i++) {
++			unsigned char c;
++			if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
++bad:
++				printk(" Bad RIP value.");
++				break;
++			}
++			printk("%02x ", c);
++		}
++	}
++	printk("\n");
++}	
++
++void handle_BUG(struct pt_regs *regs)
++{ 
++	struct bug_frame f;
++	long len;
++	const char *prefix = "";
++
++	if (user_mode(regs))
++		return; 
++	if (__copy_from_user(&f, (const void __user *) regs->rip,
++			     sizeof(struct bug_frame)))
++		return; 
++	if (f.filename >= 0 ||
++	    f.ud2[0] != 0x0f || f.ud2[1] != 0x0b) 
++		return;
++	len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
++	if (len < 0 || len >= PATH_MAX)
++		f.filename = (int)(long)"unmapped filename";
++	else if (len > 50) {
++		f.filename += len - 50;
++		prefix = "...";
++	}
++	printk("----------- [cut here ] --------- [please bite here ] ---------\n");
++	printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
++} 
++
++#ifdef CONFIG_BUG
++void out_of_line_bug(void)
++{ 
++	BUG(); 
++} 
++EXPORT_SYMBOL(out_of_line_bug);
++#endif
++
++static DEFINE_SPINLOCK(die_lock);
++static int die_owner = -1;
++static unsigned int die_nest_count;
++
++unsigned __kprobes long oops_begin(void)
++{
++	int cpu = safe_smp_processor_id();
++	unsigned long flags;
++
++	/* racy, but better than risking deadlock. */
++	local_irq_save(flags);
++	if (!spin_trylock(&die_lock)) { 
++		if (cpu == die_owner) 
++			/* nested oops. should stop eventually */;
++		else
++			spin_lock(&die_lock);
++	}
++	die_nest_count++;
++	die_owner = cpu;
++	console_verbose();
++	bust_spinlocks(1);
++	return flags;
++}
++
++void __kprobes oops_end(unsigned long flags)
++{ 
++	die_owner = -1;
++	bust_spinlocks(0);
++	die_nest_count--;
++	if (die_nest_count)
++		/* We still own the lock */
++		local_irq_restore(flags);
++	else
++		/* Nest count reaches zero, release the lock. */
++		spin_unlock_irqrestore(&die_lock, flags);
++	if (panic_on_oops)
++		panic("Fatal exception");
++}
++
++void __kprobes __die(const char * str, struct pt_regs * regs, long err)
++{
++	static int die_counter;
++	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
++#ifdef CONFIG_PREEMPT
++	printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++	printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	printk("DEBUG_PAGEALLOC");
++#endif
++	printk("\n");
++	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
++	show_registers(regs);
++	/* Executive summary in case the oops scrolled away */
++	printk(KERN_ALERT "RIP ");
++	printk_address(regs->rip); 
++	printk(" RSP <%016lx>\n", regs->rsp); 
++	if (kexec_should_crash(current))
++		crash_kexec(regs);
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++	unsigned long flags = oops_begin();
++
++	handle_BUG(regs);
++	__die(str, regs, err);
++	oops_end(flags);
++	do_exit(SIGSEGV); 
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++void __kprobes die_nmi(char *str, struct pt_regs *regs)
++{
++	unsigned long flags = oops_begin();
++
++	/*
++	 * We are in trouble anyway, lets at least try
++	 * to get a message out.
++	 */
++	printk(str, safe_smp_processor_id());
++	show_registers(regs);
++	if (kexec_should_crash(current))
++		crash_kexec(regs);
++	if (panic_on_timeout || panic_on_oops)
++		panic("nmi watchdog");
++	printk("console shuts up ...\n");
++	oops_end(flags);
++	nmi_exit();
++	local_irq_enable();
++	do_exit(SIGSEGV);
++}
++#endif
++
++static void __kprobes do_trap(int trapnr, int signr, char *str,
++			      struct pt_regs * regs, long error_code,
++			      siginfo_t *info)
++{
++	struct task_struct *tsk = current;
++
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = trapnr;
++
++	if (user_mode(regs)) {
++		if (exception_trace && unhandled_signal(tsk, signr))
++			printk(KERN_INFO
++			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++			       tsk->comm, tsk->pid, str,
++			       regs->rip, regs->rsp, error_code); 
++
++		if (info)
++			force_sig_info(signr, info, tsk);
++		else
++			force_sig(signr, tsk);
++		return;
++	}
++
++
++	/* kernel trap */ 
++	{	     
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->rip);
++		if (fixup)
++			regs->rip = fixup->fixup;
++		else	
++			die(str, regs, error_code);
++		return;
++	}
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++							== NOTIFY_STOP) \
++		return; \
++	conditional_sti(regs);						\
++	do_trap(trapnr, signr, str, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++							== NOTIFY_STOP) \
++		return; \
++	conditional_sti(regs);						\
++	do_trap(trapnr, signr, str, regs, error_code, &info); \
++}
++
++DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR(18, SIGSEGV, "reserved", reserved)
++
++/* Runs on IST stack */
++asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
++{
++	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
++			12, SIGBUS) == NOTIFY_STOP)
++		return;
++	preempt_conditional_sti(regs);
++	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++	preempt_conditional_cli(regs);
++}
++
++asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++{
++	static const char str[] = "double fault";
++	struct task_struct *tsk = current;
++
++	/* Return not checked because double check cannot be ignored */
++	notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 8;
++
++	/* This is always a kernel trap and never fixable (and thus must
++	   never return). */
++	for (;;)
++		die(str, regs, error_code);
++}
++
++asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
++						long error_code)
++{
++	struct task_struct *tsk = current;
++
++	conditional_sti(regs);
++
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 13;
++
++	if (user_mode(regs)) {
++		if (exception_trace && unhandled_signal(tsk, SIGSEGV))
++			printk(KERN_INFO
++		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++			       tsk->comm, tsk->pid,
++			       regs->rip, regs->rsp, error_code); 
++
++		force_sig(SIGSEGV, tsk);
++		return;
++	} 
++
++	/* kernel gp */
++	{
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->rip);
++		if (fixup) {
++			regs->rip = fixup->fixup;
++			return;
++		}
++		if (notify_die(DIE_GPF, "general protection fault", regs,
++					error_code, 13, SIGSEGV) == NOTIFY_STOP)
++			return;
++		die("general protection fault", regs, error_code);
++	}
++}
++
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++	printk("You probably have a hardware problem with your RAM chips\n");
++
++#if 0 /* XEN */
++	/* Clear and disable the memory parity error line. */
++	reason = (reason & 0xf) | 4;
++	outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("NMI: IOCK error (debug interrupt?)\n");
++	show_registers(regs);
++
++#if 0 /* XEN */
++	/* Re-enable the IOCK line, wait for a few seconds */
++	reason = (reason & 0xf) | 8;
++	outb(reason, 0x61);
++	mdelay(2000);
++	reason &= ~8;
++	outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{	printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
++	printk("Dazed and confused, but trying to continue\n");
++	printk("Do you have a strange power saving mode enabled?\n");
++}
++
++/* Runs on IST stack. This code must keep interrupts off all the time.
++   Nested NMIs are prevented by the CPU. */
++asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
++{
++	unsigned char reason = 0;
++	int cpu;
++
++	cpu = smp_processor_id();
++
++	/* Only the BSP gets external NMIs from the system.  */
++	if (!cpu)
++		reason = get_nmi_reason();
++
++	if (!(reason & 0xc0)) {
++		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++								== NOTIFY_STOP)
++			return;
++#ifdef CONFIG_X86_LOCAL_APIC
++		/*
++		 * Ok, so this is none of the documented NMI sources,
++		 * so it must be the NMI watchdog.
++		 */
++		if (nmi_watchdog > 0) {
++			nmi_watchdog_tick(regs,reason);
++			return;
++		}
++#endif
++		unknown_nmi_error(reason, regs);
++		return;
++	}
++	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++		return; 
++
++	/* AK: following checks seem to be broken on modern chipsets. FIXME */
++
++	if (reason & 0x80)
++		mem_parity_error(reason, regs);
++	if (reason & 0x40)
++		io_check_error(reason, regs);
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
++{
++	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
++		return;
++	}
++	preempt_conditional_sti(regs);
++	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++	preempt_conditional_cli(regs);
++}
++
++/* Help handler running on IST stack to switch back to user stack
++   for scheduling or signal handling. The actual stack switch is done in
++   entry.S */
++asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++{
++	struct pt_regs *regs = eregs;
++	/* Did already sync */
++	if (eregs == (struct pt_regs *)eregs->rsp)
++		;
++	/* Exception from user space */
++	else if (user_mode(eregs))
++		regs = task_pt_regs(current);
++	/* Exception from kernel and interrupts are enabled. Move to
++ 	   kernel process stack. */
++	else if (eregs->eflags & X86_EFLAGS_IF)
++		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++	if (eregs != regs)
++		*regs = *eregs;
++	return regs;
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_debug(struct pt_regs * regs,
++				   unsigned long error_code)
++{
++	unsigned long condition;
++	struct task_struct *tsk = current;
++	siginfo_t info;
++
++	get_debugreg(condition, 6);
++
++	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++						SIGTRAP) == NOTIFY_STOP)
++		return;
++
++	preempt_conditional_sti(regs);
++
++	/* Mask out spurious debug traps due to lazy DR7 setting */
++	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++		if (!tsk->thread.debugreg7) { 
++			goto clear_dr7;
++		}
++	}
++
++	tsk->thread.debugreg6 = condition;
++
++	/* Mask out spurious TF errors due to lazy TF clearing */
++	if (condition & DR_STEP) {
++		/*
++		 * The TF error should be masked out only if the current
++		 * process is not traced and if the TRAP flag has been set
++		 * previously by a tracing process (condition detected by
++		 * the PT_DTRACE flag); remember that the i386 TRAP flag
++		 * can be modified by the process itself in user mode,
++		 * allowing programs to debug themselves without the ptrace()
++		 * interface.
++		 */
++                if (!user_mode(regs))
++                       goto clear_TF_reenable;
++		/*
++		 * Was the TF flag set by a debugger? If so, clear it now,
++		 * so that register information is correct.
++		 */
++		if (tsk->ptrace & PT_DTRACE) {
++			regs->eflags &= ~TF_MASK;
++			tsk->ptrace &= ~PT_DTRACE;
++		}
++	}
++
++	/* Ok, finally something we can handle */
++	tsk->thread.trap_no = 1;
++	tsk->thread.error_code = error_code;
++	info.si_signo = SIGTRAP;
++	info.si_errno = 0;
++	info.si_code = TRAP_BRKPT;
++	info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
++	force_sig_info(SIGTRAP, &info, tsk);
++
++clear_dr7:
++	set_debugreg(0UL, 7);
++	preempt_conditional_cli(regs);
++	return;
++
++clear_TF_reenable:
++	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++	regs->eflags &= ~TF_MASK;
++	preempt_conditional_cli(regs);
++}
++
++static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
++{
++	const struct exception_table_entry *fixup;
++	fixup = search_exception_tables(regs->rip);
++	if (fixup) {
++		regs->rip = fixup->fixup;
++		return 1;
++	}
++	notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
++	/* Illegal floating point operation in the kernel */
++	current->thread.trap_no = trapnr;
++	die(str, regs, 0);
++	return 0;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++asmlinkage void do_coprocessor_error(struct pt_regs *regs)
++{
++	void __user *rip = (void __user *)(regs->rip);
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short cwd, swd;
++
++	conditional_sti(regs);
++	if (!user_mode(regs) &&
++	    kernel_math_error(regs, "kernel x87 math error", 16))
++		return;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 16;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = rip;
++	/*
++	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
++	 * status.  0x3f is the exception bits in these regs, 0x200 is the
++	 * C1 reg you need in case of a stack fault, 0x040 is the stack
++	 * fault bit.  We should only be taking one exception at a time,
++	 * so if this combination doesn't produce any single exception,
++	 * then we have a bad program that isn't synchronizing its FPU usage
++	 * and it will suffer the consequences since we won't be able to
++	 * fully reproduce the context of the exception
++	 */
++	cwd = get_fpu_cwd(task);
++	swd = get_fpu_swd(task);
++	switch (swd & ~cwd & 0x3f) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			/*
++			 * swd & 0x240 == 0x040: Stack Underflow
++			 * swd & 0x240 == 0x240: Stack Overflow
++			 * User must clear the SF bit (0x40) if set
++			 */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void bad_intr(void)
++{
++	printk("bad interrupt"); 
++}
++
++asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++{
++	void __user *rip = (void __user *)(regs->rip);
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short mxcsr;
++
++	conditional_sti(regs);
++	if (!user_mode(regs) &&
++        	kernel_math_error(regs, "kernel simd math error", 19))
++		return;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 19;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = rip;
++	/*
++	 * The SIMD FPU exceptions are handled a little differently, as there
++	 * is only a single status/control register.  Thus, to determine which
++	 * unmasked exception was caught we must mask the exception mask bits
++	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++	 */
++	mxcsr = get_fpu_mxcsr(task);
++	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++{
++}
++
++#if 0
++asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++{
++}
++#endif
++
++asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
++{
++}
++
++/*
++ *  'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ */
++asmlinkage void math_state_restore(void)
++{
++	struct task_struct *me = current;
++        /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++
++	if (!used_math())
++		init_fpu(me);
++	restore_fpu_checking(&me->thread.i387.fxsave);
++	task_thread_info(me)->status |= TS_USEDFPU;
++}
++
++
++/*
++ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
++ * specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++        {  0, 0|4, __KERNEL_CS, (unsigned long)divide_error               },
++        {  1, 0|4, __KERNEL_CS, (unsigned long)debug                      },
++        {  3, 3|4, __KERNEL_CS, (unsigned long)int3                       },
++        {  4, 3|4, __KERNEL_CS, (unsigned long)overflow                   },
++        {  5, 0|4, __KERNEL_CS, (unsigned long)bounds                     },
++        {  6, 0|4, __KERNEL_CS, (unsigned long)invalid_op                 },
++        {  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available       },
++        {  9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
++        { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS                },
++        { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present        },
++        { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment              },
++        { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection         },
++        { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault                 },
++        { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug     },
++        { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error          },
++        { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check            },
++#ifdef CONFIG_X86_MCE
++        { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check              },
++#endif
++        { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error     },
++#ifdef CONFIG_IA32_EMULATION
++	{ IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
++#endif
++        {  0, 0,           0, 0                                              }
++};
++
++void __init trap_init(void)
++{
++        int ret;
++
++        ret = HYPERVISOR_set_trap_table(trap_table);
++        if (ret) 
++		printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++	/*
++	 * Should be a barrier for any external CPU state.
++	 */
++	cpu_init();
++}
++
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
++{
++	const trap_info_t *t = trap_table;
++
++	for (t = trap_table; t->address; t++) {
++		trap_ctxt[t->vector].flags = t->flags;
++		trap_ctxt[t->vector].cs = t->cs;
++		trap_ctxt[t->vector].address = t->address;
++	}
++}
++
++
++/* Actual parsing is done early in setup.c. */
++static int __init oops_dummy(char *s)
++{ 
++	panic_on_oops = 1;
++	return 1;
++} 
++__setup("oops=", oops_dummy); 
++
++static int __init kstack_setup(char *s)
++{
++	kstack_depth_to_print = simple_strtoul(s,NULL,0);
++	return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++	if (strcmp(s, "old") == 0)
++		call_trace = -1;
++	else if (strcmp(s, "both") == 0)
++		call_trace = 0;
++	else if (strcmp(s, "newfallback") == 0)
++		call_trace = 1;
++	else if (strcmp(s, "new") == 0)
++		call_trace = 2;
++	return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/vmlinux.lds.S
+--- a/arch/x86_64/kernel/vmlinux.lds.S	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/kernel/vmlinux.lds.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -13,6 +13,13 @@
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+ jiffies_64 = jiffies;
++PHDRS {
++	text PT_LOAD FLAGS(5);	/* R_E */
++	data PT_LOAD FLAGS(7);	/* RWE */
++	user PT_LOAD FLAGS(7);	/* RWE */
++	data.init PT_LOAD FLAGS(7);	/* RWE */
++	note PT_NOTE FLAGS(4);	/* R__ */
++}
+ SECTIONS
+ {
+   . = __START_KERNEL;
+@@ -31,7 +38,7 @@
+ 	KPROBES_TEXT
+ 	*(.fixup)
+ 	*(.gnu.warning)
+-	} = 0x9090
++	} :text = 0x9090
+   				/* out-of-line lock text */
+   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
+ 
+@@ -57,16 +64,9 @@
+   .data : AT(ADDR(.data) - LOAD_OFFSET) {
+ 	*(.data)
+ 	CONSTRUCTORS
+-	}
++	} :data
+ 
+   _edata = .;			/* End of data section */
+-
+-  __bss_start = .;		/* BSS */
+-  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+-	*(.bss.page_aligned)	
+-	*(.bss)
+-	}
+-  __bss_stop = .;
+ 
+   . = ALIGN(PAGE_SIZE);
+   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+@@ -89,7 +89,7 @@
+ #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
+ 
+   . = VSYSCALL_ADDR;
+-  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
++  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
+   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
+ 
+   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+@@ -132,7 +132,7 @@
+   . = ALIGN(8192);		/* init_task */
+   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+ 	*(.data.init_task)
+-  }
++  }:data.init
+ 
+   . = ALIGN(4096);
+   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+@@ -222,6 +222,14 @@
+   . = ALIGN(4096);
+   __nosave_end = .;
+ 
++  __bss_start = .;		/* BSS */
++  . = ALIGN(4096);
++  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
++	*(.bss.page_aligned)
++	*(.bss)
++	}
++  __bss_stop = .;
++
+   _end = . ;
+ 
+   /* Sections to be discarded */
+@@ -235,4 +243,6 @@
+   STABS_DEBUG
+ 
+   DWARF_DEBUG
++
++  NOTES
+ }
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/vsyscall-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/vsyscall-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,227 @@
++/*
++ *  linux/arch/x86_64/kernel/vsyscall.c
++ *
++ *  Copyright (C) 2001 Andrea Arcangeli <andrea at suse.de> SuSE
++ *  Copyright 2003 Andi Kleen, SuSE Labs.
++ *
++ *  Thanks to hpa at transmeta.com for some useful hint.
++ *  Special thanks to Ingo Molnar for his early experience with
++ *  a different vsyscall implementation for Linux/IA32 and for the name.
++ *
++ *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
++ *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
++ *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
++ *  jumping out of line if necessary. We cannot add more with this
++ *  mechanism because older kernels won't return -ENOSYS.
++ *  If we want more than four we need a vDSO.
++ *
++ *  Note: the concept clashes with user mode linux. If you use UML and
++ *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
++ */
++
++#include <linux/time.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/seqlock.h>
++#include <linux/jiffies.h>
++#include <linux/sysctl.h>
++
++#include <asm/vsyscall.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/fixmap.h>
++#include <asm/errno.h>
++#include <asm/io.h>
++
++#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++
++int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
++seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++
++#include <asm/unistd.h>
++
++static __always_inline void timeval_normalize(struct timeval * tv)
++{
++	time_t __sec;
++
++	__sec = tv->tv_usec / 1000000;
++	if (__sec) {
++		tv->tv_usec %= 1000000;
++		tv->tv_sec += __sec;
++	}
++}
++
++static __always_inline void do_vgettimeofday(struct timeval * tv)
++{
++	long sequence, t;
++	unsigned long sec, usec;
++
++	do {
++		sequence = read_seqbegin(&__xtime_lock);
++		
++		sec = __xtime.tv_sec;
++		usec = (__xtime.tv_nsec / 1000) +
++			(__jiffies - __wall_jiffies) * (1000000 / HZ);
++
++		if (__vxtime.mode != VXTIME_HPET) {
++			t = get_cycles_sync();
++			if (t < __vxtime.last_tsc)
++				t = __vxtime.last_tsc;
++			usec += ((t - __vxtime.last_tsc) *
++				 __vxtime.tsc_quot) >> 32;
++			/* See comment in x86_64 do_gettimeofday. */
++		} else {
++			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++				  __vxtime.last) * __vxtime.quot) >> 32;
++		}
++	} while (read_seqretry(&__xtime_lock, sequence));
++
++	tv->tv_sec = sec + usec / 1000000;
++	tv->tv_usec = usec % 1000000;
++}
++
++/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++static __always_inline void do_get_tz(struct timezone * tz)
++{
++	*tz = __sys_tz;
++}
++
++static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++	int ret;
++	asm volatile("vsysc2: syscall"
++		: "=a" (ret)
++		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++	return ret;
++}
++
++static __always_inline long time_syscall(long *t)
++{
++	long secs;
++	asm volatile("vsysc1: syscall"
++		: "=a" (secs)
++		: "0" (__NR_time),"D" (t) : __syscall_clobber);
++	return secs;
++}
++
++int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
++{
++	if (!__sysctl_vsyscall)
++		return gettimeofday(tv,tz);
++	if (tv)
++		do_vgettimeofday(tv);
++	if (tz)
++		do_get_tz(tz);
++	return 0;
++}
++
++/* This will break when the xtime seconds get inaccurate, but that is
++ * unlikely */
++time_t __vsyscall(1) vtime(time_t *t)
++{
++	if (!__sysctl_vsyscall)
++		return time_syscall(t);
++	else if (t)
++		*t = __xtime.tv_sec;		
++	return __xtime.tv_sec;
++}
++
++long __vsyscall(2) venosys_0(void)
++{
++	return -ENOSYS;
++}
++
++long __vsyscall(3) venosys_1(void)
++{
++	return -ENOSYS;
++}
++
++#ifdef CONFIG_SYSCTL
++
++#define SYSCALL 0x050f
++#define NOP2    0x9090
++
++/*
++ * NOP out syscall in vsyscall page when not needed.
++ */
++static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
++                        void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	extern u16 vsysc1, vsysc2;
++	u16 *map1, *map2;
++	int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
++	if (!write)
++		return ret;
++	/* gcc has some trouble with __va(__pa()), so just do it this
++	   way. */
++	map1 = ioremap(__pa_symbol(&vsysc1), 2);
++	if (!map1)
++		return -ENOMEM;
++	map2 = ioremap(__pa_symbol(&vsysc2), 2);
++	if (!map2) {
++		ret = -ENOMEM;
++		goto out;
++	}
++	if (!sysctl_vsyscall) {
++		*map1 = SYSCALL;
++		*map2 = SYSCALL;
++	} else {
++		*map1 = NOP2;
++		*map2 = NOP2;
++	}
++	iounmap(map2);
++out:
++	iounmap(map1);
++	return ret;
++}
++
++static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
++				void __user *oldval, size_t __user *oldlenp,
++				void __user *newval, size_t newlen,
++				void **context)
++{
++	return -ENOSYS;
++}
++
++static ctl_table kernel_table2[] = {
++	{ .ctl_name = 99, .procname = "vsyscall64",
++	  .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++	  .strategy = vsyscall_sysctl_nostrat,
++	  .proc_handler = vsyscall_sysctl_change },
++	{ 0, }
++};
++
++static ctl_table kernel_root_table2[] = {
++	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
++	  .child = kernel_table2 },
++	{ 0 },
++};
++
++#endif
++
++static void __init map_vsyscall(void)
++{
++	extern char __vsyscall_0;
++	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++
++static int __init vsyscall_init(void)
++{
++	BUG_ON(((unsigned long) &vgettimeofday !=
++			VSYSCALL_ADDR(__NR_vgettimeofday)));
++	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
++	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++	map_vsyscall();
++#ifdef CONFIG_XEN
++	sysctl_vsyscall = 0; /* disable vgettimeofay() */
++#endif
++#ifdef CONFIG_SYSCTL
++	register_sysctl_table(kernel_root_table2, 0);
++#endif
++	return 0;
++}
++
++__initcall(vsyscall_init);
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/kernel/xen_entry.S
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/kernel/xen_entry.S	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,36 @@
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */                        
++/* Offsets into shared_info_t. */                
++#define evtchn_upcall_pending		/* 0 */
++#define evtchn_upcall_mask		1
++
++#define sizeof_vcpu_shift		6
++
++#ifdef CONFIG_SMP
++//#define preempt_disable(reg)	incl threadinfo_preempt_count(reg)
++//#define preempt_enable(reg)	decl threadinfo_preempt_count(reg)
++#define preempt_disable(reg)
++#define preempt_enable(reg)
++#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%rbp)			; \
++				movq %gs:pda_cpunumber,reg		; \
++				shl  $32, reg				; \
++				shr  $32-sizeof_vcpu_shift,reg		; \
++				addq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)	preempt_enable(%rbp)			; \
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/lib/Makefile
+--- a/arch/x86_64/lib/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/lib/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -10,3 +10,4 @@
+ 	usercopy.o getuser.o putuser.o  \
+ 	thunk.o clear_page.o copy_page.o bitstr.o bitops.o
+ lib-y += memcpy.o memmove.o memset.o copy_user.o
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/lib/scrub.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/lib/scrub.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++#include "../../i386/lib/scrub.c"
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/mm/Makefile
+--- a/arch/x86_64/mm/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/mm/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -7,5 +7,8 @@
+ obj-$(CONFIG_NUMA) += numa.o
+ obj-$(CONFIG_K8_NUMA) += k8topology.o
+ obj-$(CONFIG_ACPI_NUMA) += srat.o
++obj-$(CONFIG_XEN) += hypervisor.o
+ 
+ hugetlbpage-y = ../../i386/mm/hugetlbpage.o
++ioremap-$(CONFIG_XEN) := ../../i386/mm/ioremap.o
++hypervisor-y := ../../i386/mm/hypervisor.o
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/mm/fault-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/mm/fault-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,724 @@
++/*
++ *  linux/arch/x86-64/mm/fault.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/kdebug.h>
++#include <asm-generic/sections.h>
++
++/* Page fault error code bits */
++#define PF_PROT	(1<<0)		/* or no page found */
++#define PF_WRITE	(1<<1)
++#define PF_USER	(1<<2)
++#define PF_RSVD	(1<<3)
++#define PF_INSTR	(1<<4)
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
++/* Hook to register for page fault notifications */
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++			struct pt_regs *regs, long err, int trap, int sig)
++{
++	struct die_args args = {
++		.regs = regs,
++		.str = str,
++		.err = err,
++		.trapnr = trap,
++		.signr = sig
++	};
++	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++			struct pt_regs *regs, long err, int trap, int sig)
++{
++	return NOTIFY_DONE;
++}
++#endif
++
++void bust_spinlocks(int yes)
++{
++	int loglevel_save = console_loglevel;
++	if (yes) {
++		oops_in_progress = 1;
++	} else {
++#ifdef CONFIG_VT
++		unblank_screen();
++#endif
++		oops_in_progress = 0;
++		/*
++		 * OK, the message is on the console.  Now we call printk()
++		 * without oops_in_progress set so that printk will give klogd
++		 * a poke.  Hold onto your hats...
++		 */
++		console_loglevel = 15;		/* NMI oopser may have shut the console up */
++		printk(" ");
++		console_loglevel = loglevel_save;
++	}
++}
++
++/* Sometimes the CPU reports invalid exceptions on prefetch.
++   Check that here and ignore.
++   Opcode checker based on code by Richard Brunner */
++static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++				unsigned long error_code)
++{ 
++	unsigned char *instr;
++	int scan_more = 1;
++	int prefetch = 0; 
++	unsigned char *max_instr;
++
++	/* If it was a exec fault ignore */
++	if (error_code & PF_INSTR)
++		return 0;
++	
++	instr = (unsigned char *)convert_rip_to_linear(current, regs);
++	max_instr = instr + 15;
++
++	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++		return 0;
++
++	while (scan_more && instr < max_instr) { 
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
++
++		if (__get_user(opcode, instr))
++			break; 
++
++		instr_hi = opcode & 0xf0; 
++		instr_lo = opcode & 0x0f; 
++		instr++;
++
++		switch (instr_hi) { 
++		case 0x20:
++		case 0x30:
++			/* Values 0x26,0x2E,0x36,0x3E are valid x86
++			   prefixes.  In long mode, the CPU will signal
++			   invalid opcode if some of these prefixes are
++			   present so we will never get here anyway */
++			scan_more = ((instr_lo & 7) == 0x6);
++			break;
++			
++		case 0x40:
++			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
++			   Need to figure out under what instruction mode the
++			   instruction was issued ... */
++			/* Could check the LDT for lm, but for now it's good
++			   enough to assume that long mode only uses well known
++			   segments or kernel. */
++			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++			break;
++			
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;		
++		case 0xF0:
++			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;			
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++			if (__get_user(opcode, instr)) 
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;			
++		default:
++			scan_more = 0;
++			break;
++		} 
++	}
++	return prefetch;
++}
++
++static int bad_address(void *p) 
++{ 
++	unsigned long dummy;
++	return __get_user(dummy, (unsigned long *)p);
++} 
++
++void dump_pagetable(unsigned long address)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++	pgd += pgd_index(address);
++	if (bad_address(pgd)) goto bad;
++	printk("PGD %lx ", pgd_val(*pgd));
++	if (!pgd_present(*pgd)) goto ret; 
++
++	pud = pud_offset(pgd, address);
++	if (bad_address(pud)) goto bad;
++	printk("PUD %lx ", pud_val(*pud));
++	if (!pud_present(*pud))	goto ret;
++
++	pmd = pmd_offset(pud, address);
++	if (bad_address(pmd)) goto bad;
++	printk("PMD %lx ", pmd_val(*pmd));
++	if (!pmd_present(*pmd))	goto ret;	 
++
++	pte = pte_offset_kernel(pmd, address);
++	if (bad_address(pte)) goto bad;
++	printk("PTE %lx", pte_val(*pte)); 
++ret:
++	printk("\n");
++	return;
++bad:
++	printk("BAD\n");
++}
++
++static const char errata93_warning[] = 
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++   BIOS SMM functions are required to use a specific workaround
++   to avoid corruption of the 64bit RIP register on C stepping K8. 
++   A lot of BIOS that didn't get tested properly miss this. 
++   The OS sees this as a page fault with the upper 32bits of RIP cleared.
++   Try to work around it here.
++   Note we only handle faults in kernel here. */
++
++static int is_errata93(struct pt_regs *regs, unsigned long address) 
++{
++	static int warned;
++	if (address != regs->rip)
++		return 0;
++	if ((address >> 32) != 0) 
++		return 0;
++	address |= 0xffffffffUL << 32;
++	if ((address >= (u64)_stext && address <= (u64)_etext) || 
++	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
++		if (!warned) {
++			printk(errata93_warning); 		
++			warned = 1;
++		}
++		regs->rip = address;
++		return 1;
++	}
++	return 0;
++} 
++
++int unhandled_signal(struct task_struct *tsk, int sig)
++{
++	if (tsk->pid == 1)
++		return 1;
++	if (tsk->ptrace & PT_PTRACED)
++		return 0;
++	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
++		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++}
++
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++				 unsigned long error_code)
++{
++	unsigned long flags = oops_begin();
++	struct task_struct *tsk;
++
++	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++	       current->comm, address);
++	dump_pagetable(address);
++	tsk = current;
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++	__die("Bad pagetable", regs, error_code);
++	oops_end(flags);
++	do_exit(SIGKILL);
++}
++
++/*
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++	pgd_t *pgd, *pgd_ref;
++	pud_t *pud, *pud_ref;
++	pmd_t *pmd, *pmd_ref;
++	pte_t *pte, *pte_ref;
++
++	/* Copy kernel mappings over when needed. This can also
++	   happen within a race in page table update. In the later
++	   case just flush. */
++
++	/* On Xen the line below does not always work. Needs investigating! */
++	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
++	pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++	pgd += pgd_index(address);
++	pgd_ref = pgd_offset_k(address);
++	if (pgd_none(*pgd_ref))
++		return -1;
++	if (pgd_none(*pgd))
++		set_pgd(pgd, *pgd_ref);
++	else
++		BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++
++	/* Below here mismatches are bugs because these lower tables
++	   are shared */
++
++	pud = pud_offset(pgd, address);
++	pud_ref = pud_offset(pgd_ref, address);
++	if (pud_none(*pud_ref))
++		return -1;
++	if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++		BUG();
++	pmd = pmd_offset(pud, address);
++	pmd_ref = pmd_offset(pud_ref, address);
++	if (pmd_none(*pmd_ref))
++		return -1;
++	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++		BUG();
++	pte_ref = pte_offset_kernel(pmd_ref, address);
++	if (!pte_present(*pte_ref))
++		return -1;
++	pte = pte_offset_kernel(pmd, address);
++	/* Don't use pte_page here, because the mappings can point
++	   outside mem_map, and the NUMA hash lookup cannot handle
++	   that. */
++	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++		BUG();
++	return 0;
++}
++
++int page_fault_trace = 0;
++int exception_trace = 1;
++
++
++#define MEM_VERBOSE 1
++
++#ifdef MEM_VERBOSE
++#define MEM_LOG(_f, _a...)			\
++	printk("fault.c:[%d]-> " _f "\n",	\
++	__LINE__ , ## _a )
++#else
++#define MEM_LOG(_f, _a...) ((void)0)
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++			  unsigned long address,
++			  unsigned long error_code)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++#ifdef CONFIG_XEN
++	/* Faults in hypervisor area are never spurious. */
++	if ((address >= HYPERVISOR_VIRT_START) &&
++	    (address < HYPERVISOR_VIRT_END))
++		return 0;
++#endif
++
++	/* Reserved-bit violation or user access to kernel space? */
++	if (error_code & (PF_RSVD|PF_USER))
++		return 0;
++
++	pgd = init_mm.pgd + pgd_index(address);
++	if (!pgd_present(*pgd))
++		return 0;
++
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return 0;
++
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return 0;
++
++	pte = pte_offset_kernel(pmd, address);
++	if (!pte_present(*pte))
++		return 0;
++	if ((error_code & PF_WRITE) && !pte_write(*pte))
++		return 0;
++	if ((error_code & PF_INSTR) && (__pte_val(*pte) & _PAGE_NX))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
++					unsigned long error_code)
++{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	unsigned long address;
++	const struct exception_table_entry *fixup;
++	int write;
++	unsigned long flags;
++	siginfo_t info;
++
++	if (!user_mode(regs))
++		error_code &= ~PF_USER; /* means kernel */
++
++	tsk = current;
++	mm = tsk->mm;
++	prefetchw(&mm->mmap_sem);
++
++	/* get the address */
++	address = current_vcpu_info()->arch.cr2;
++
++	info.si_code = SEGV_MAPERR;
++
++
++	/*
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 9) == 0.
++	 */
++	if (unlikely(address >= TASK_SIZE64)) {
++		/*
++		 * Don't check for the module range here: its PML4
++		 * is always initialized because it's shared with the main
++		 * kernel text. Only vmalloc may need PML4 syncups.
++		 */
++		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
++			if (vmalloc_fault(address) >= 0)
++				return;
++		}
++		/* Can take a spurious fault if mapping changes R/O -> R/W. */
++		if (spurious_fault(regs, address, error_code))
++			return;
++		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++						SIGSEGV) == NOTIFY_STOP)
++			return;
++		/*
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	}
++
++	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++					SIGSEGV) == NOTIFY_STOP)
++		return;
++
++	if (likely(regs->eflags & X86_EFLAGS_IF))
++		local_irq_enable();
++
++	if (unlikely(page_fault_trace))
++		printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
++		       regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); 
++
++	if (unlikely(error_code & PF_RSVD))
++		pgtable_bad(address, regs, error_code);
++
++	/*
++	 * If we're in an interrupt or have no user
++	 * context, we must not take the fault..
++	 */
++	if (unlikely(in_atomic() || !mm))
++		goto bad_area_nosemaphore;
++
++ again:
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
++	 * erroneous fault occurring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibilty of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & PF_USER) == 0 &&
++		    !search_exception_tables(regs->rip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
++	}
++
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (likely(vma->vm_start <= address))
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & 4) {
++		/* Allow userspace just enough access below the stack pointer
++		 * to let the 'enter' instruction work.
++		 */
++		if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	info.si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & (PF_PROT|PF_WRITE)) {
++		default:	/* 3: write, present */
++			/* fall through */
++		case PF_WRITE:		/* write, not present */
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++			write++;
++			break;
++		case PF_PROT:		/* read, present */
++			goto bad_area;
++		case 0:			/* read, not present */
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++				goto bad_area;
++	}
++
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++	switch (handle_mm_fault(mm, vma, address, write)) {
++	case VM_FAULT_MINOR:
++		tsk->min_flt++;
++		break;
++	case VM_FAULT_MAJOR:
++		tsk->maj_flt++;
++		break;
++	case VM_FAULT_SIGBUS:
++		goto do_sigbus;
++	default:
++		goto out_of_memory;
++	}
++
++	up_read(&mm->mmap_sem);
++	return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & PF_USER) {
++		if (is_prefetch(regs, address, error_code))
++			return;
++
++		/* Work around K8 erratum #100 K8 in compat mode
++		   occasionally jumps to illegal addresses >4GB.  We
++		   catch this here in the page fault handler because
++		   these addresses are not reachable. Just detect this
++		   case and return.  Any code segment in LDT is
++		   compatibility mode. */
++		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++		    (address >> 32))
++			return;
++
++		if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
++			printk(
++		       "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
++					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
++					tsk->comm, tsk->pid, address, regs->rip,
++					regs->rsp, error_code);
++		}
++       
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		info.si_signo = SIGSEGV;
++		info.si_errno = 0;
++		/* info.si_code has been set above */
++		info.si_addr = (void __user *)address;
++		force_sig_info(SIGSEGV, &info, tsk);
++		return;
++	}
++
++no_context:
++	
++	/* Are we prepared to handle this kernel fault?  */
++	fixup = search_exception_tables(regs->rip);
++	if (fixup) {
++		regs->rip = fixup->fixup;
++		return;
++	}
++
++	/* 
++	 * Hall of shame of CPU/BIOS bugs.
++	 */
++
++ 	if (is_prefetch(regs, address, error_code))
++ 		return;
++
++	if (is_errata93(regs, address))
++		return; 
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++	flags = oops_begin();
++
++	if (address < PAGE_SIZE)
++		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++	else
++		printk(KERN_ALERT "Unable to handle kernel paging request");
++	printk(" at %016lx RIP: \n" KERN_ALERT,address);
++	printk_address(regs->rip);
++	dump_pagetable(address);
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++	__die("Oops", regs, error_code);
++	/* Executive summary in case the body of the oops scrolled away */
++	printk(KERN_EMERG "CR2: %016lx\n", address);
++	oops_end(flags);
++	do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (current->pid == 1) { 
++		yield();
++		goto again;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & 4)
++		do_exit(SIGKILL);
++	goto no_context;
++
++do_sigbus:
++	up_read(&mm->mmap_sem);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & PF_USER))
++		goto no_context;
++
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	info.si_signo = SIGBUS;
++	info.si_errno = 0;
++	info.si_code = BUS_ADRERR;
++	info.si_addr = (void __user *)address;
++	force_sig_info(SIGBUS, &info, tsk);
++	return;
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++void vmalloc_sync_all(void)
++{
++	/* Note that races in the updates of insync and start aren't 
++	   problematic:
++	   insync can only get set bits added, and updates to start are only
++	   improving performance (without affecting correctness if undone). */
++	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++	static unsigned long start = VMALLOC_START & PGDIR_MASK;
++	unsigned long address;
++
++	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++		if (!test_bit(pgd_index(address), insync)) {
++			const pgd_t *pgd_ref = pgd_offset_k(address);
++			struct page *page;
++
++			if (pgd_none(*pgd_ref))
++				continue;
++			spin_lock(&pgd_lock);
++			for (page = pgd_list; page;
++			     page = (struct page *)page->index) {
++				pgd_t *pgd;
++				pgd = (pgd_t *)page_address(page) + pgd_index(address);
++				if (pgd_none(*pgd))
++					set_pgd(pgd, *pgd_ref);
++				else
++					BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++			}
++			spin_unlock(&pgd_lock);
++			set_bit(pgd_index(address), insync);
++		}
++		if (address == start)
++			start = address + PGDIR_SIZE;
++	}
++	/* Check that there is no need to do the same for the modules area. */
++	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
++	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 
++				(__START_KERNEL & PGDIR_MASK)));
++}
++
++static int __init enable_pagefaulttrace(char *str)
++{
++	page_fault_trace = 1;
++	return 1;
++}
++__setup("pagefaulttrace", enable_pagefaulttrace);
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/mm/init-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/mm/init-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1213 @@
++/*
++ *  linux/arch/x86_64/mm/init.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
++ *  Copyright (C) 2002,2003 Andi Kleen <ak at suse.de>
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *	Modified for Xen.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++#include <linux/pci.h>
++#include <linux/poison.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++#include <linux/memory_hotplug.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/sections.h>
++
++#include <xen/features.h>
++
++#ifndef Dprintk
++#define Dprintk(x...)
++#endif
++
++struct dma_mapping_ops* dma_ops;
++EXPORT_SYMBOL(dma_ops);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++unsigned int __kernel_page_user;
++EXPORT_SYMBOL(__kernel_page_user);
++#endif
++
++int after_bootmem;
++
++extern unsigned long *contiguous_bitmap;
++
++static unsigned long dma_reserve __initdata;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++extern unsigned long start_pfn;
++
++/*
++ * Use this until direct mapping is established, i.e. before __va() is 
++ * available in init_memory_mapping().
++ */
++
++#define addr_to_page(addr, page)				\
++	(addr) &= PHYSICAL_PAGE_MASK;				\
++	(page) = ((unsigned long *) ((unsigned long)		\
++	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
++	__START_KERNEL_map)))
++
++static void __meminit early_make_page_readonly(void *va, unsigned int feature)
++{
++	unsigned long addr, _va = (unsigned long)va;
++	pte_t pte, *ptep;
++	unsigned long *page = (unsigned long *) init_level4_pgt;
++
++	BUG_ON(after_bootmem);
++
++	if (xen_feature(feature))
++		return;
++
++	addr = (unsigned long) page[pgd_index(_va)];
++	addr_to_page(addr, page);
++
++	addr = page[pud_index(_va)];
++	addr_to_page(addr, page);
++
++	addr = page[pmd_index(_va)];
++	addr_to_page(addr, page);
++
++	ptep = (pte_t *) &page[pte_index(_va)];
++
++	pte.pte = ptep->pte & ~_PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
++		BUG();
++}
++
++static void __make_page_readonly(void *va)
++{
++	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++	unsigned long addr = (unsigned long) va;
++
++	pgd = pgd_offset_k(addr);
++	pud = pud_offset(pgd, addr);
++	pmd = pmd_offset(pud, addr);
++	ptep = pte_offset_kernel(pmd, addr);
++
++	pte.pte = ptep->pte & ~_PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++		xen_l1_entry_update(ptep, pte); /* fallback */
++
++	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++		__make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++static void __make_page_writable(void *va)
++{
++	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++	unsigned long addr = (unsigned long) va;
++
++	pgd = pgd_offset_k(addr);
++	pud = pud_offset(pgd, addr);
++	pmd = pmd_offset(pud, addr);
++	ptep = pte_offset_kernel(pmd, addr);
++
++	pte.pte = ptep->pte | _PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++		xen_l1_entry_update(ptep, pte); /* fallback */
++
++	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++		__make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++	if (!xen_feature(feature))
++		__make_page_readonly(va);
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++	if (!xen_feature(feature))
++		__make_page_writable(va);
++}
++
++void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		__make_page_readonly(va);
++		va = (void*)((unsigned long)va + PAGE_SIZE);
++	}
++}
++
++void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		__make_page_writable(va);
++		va = (void*)((unsigned long)va + PAGE_SIZE);
++	}
++}
++
++/*
++ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
++ * physical space so we can cache the place of the first one and move
++ * around without checking the pgd every time.
++ */
++
++void show_mem(void)
++{
++	long i, total = 0, reserved = 0;
++	long shared = 0, cached = 0;
++	pg_data_t *pgdat;
++	struct page *page;
++
++	printk(KERN_INFO "Mem-info:\n");
++	show_free_areas();
++	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++
++	for_each_online_pgdat(pgdat) {
++               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++			page = pfn_to_page(pgdat->node_start_pfn + i);
++			total++;
++			if (PageReserved(page))
++				reserved++;
++			else if (PageSwapCache(page))
++				cached++;
++			else if (page_count(page))
++				shared += page_count(page) - 1;
++               }
++	}
++	printk(KERN_INFO "%lu pages of RAM\n", total);
++	printk(KERN_INFO "%lu reserved pages\n",reserved);
++	printk(KERN_INFO "%lu pages shared\n",shared);
++	printk(KERN_INFO "%lu pages swap cached\n",cached);
++}
++
++
++static __init void *spp_getpage(void)
++{ 
++	void *ptr;
++	if (after_bootmem)
++		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
++	else if (start_pfn < table_end) {
++		ptr = __va(start_pfn << PAGE_SHIFT);
++		start_pfn++;
++		memset(ptr, 0, PAGE_SIZE);
++	} else
++		ptr = alloc_bootmem_pages(PAGE_SIZE);
++	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
++		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++
++	Dprintk("spp_getpage %p\n", ptr);
++	return ptr;
++} 
++
++#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
++#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
++
++static __init void set_pte_phys(unsigned long vaddr,
++			 unsigned long phys, pgprot_t prot, int user_mode)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++
++	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++	pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
++	if (pgd_none(*pgd)) {
++		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++		return;
++	}
++	pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
++	if (pud_none(*pud)) {
++		pmd = (pmd_t *) spp_getpage(); 
++		make_page_readonly(pmd, XENFEAT_writable_page_tables);
++		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++		if (pmd != pmd_offset(pud, 0)) {
++			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++			return;
++		}
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		pte = (pte_t *) spp_getpage();
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
++		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++		if (pte != pte_offset_kernel(pmd, 0)) {
++			printk("PAGETABLE BUG #02!\n");
++			return;
++		}
++	}
++	if (pgprot_val(prot))
++		new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
++	else
++		new_pte = __pte(0);
++
++	pte = pte_offset_kernel(pmd, vaddr);
++	if (!pte_none(*pte) &&
++	    __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++		pte_ERROR(*pte);
++	set_pte(pte, new_pte);
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++static __init void set_pte_phys_ma(unsigned long vaddr,
++				   unsigned long phys, pgprot_t prot)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++
++	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++	pgd = pgd_offset_k(vaddr);
++	if (pgd_none(*pgd)) {
++		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++		return;
++	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++
++		pmd = (pmd_t *) spp_getpage(); 
++		make_page_readonly(pmd, XENFEAT_writable_page_tables);
++		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++		if (pmd != pmd_offset(pud, 0)) {
++			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++			return;
++		}
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		pte = (pte_t *) spp_getpage();
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
++		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++		if (pte != pte_offset_kernel(pmd, 0)) {
++			printk("PAGETABLE BUG #02!\n");
++			return;
++		}
++	}
++	new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++
++	pte = pte_offset_kernel(pmd, vaddr);
++	if (!pte_none(*pte) &&
++#ifdef CONFIG_ACPI
++	    /* __acpi_map_table() fails to properly call clear_fixmap() */
++	    (vaddr < __fix_to_virt(FIX_ACPI_END) ||
++	     vaddr > __fix_to_virt(FIX_ACPI_BEGIN)) &&
++#endif
++	    __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++		pte_ERROR(*pte);
++	set_pte(pte, new_pte);
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++/* NOTE: this is meant to be run only at boot */
++void __init 
++__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++	unsigned long address = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		printk("Invalid __set_fixmap\n");
++		return;
++	}
++	switch (idx) {
++	case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
++		set_pte_phys(address, phys, prot, 0);
++		set_pte_phys(address, phys, prot, 1);
++		break;
++	default:
++		set_pte_phys_ma(address, phys, prot);
++		break;
++	}
++}
++
++unsigned long __initdata table_start, table_end; 
++
++static __meminit void *alloc_static_page(unsigned long *phys)
++{
++	unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++
++	if (after_bootmem) {
++		void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
++
++		*phys = __pa(adr);
++		return adr;
++	}
++
++	*phys = start_pfn << PAGE_SHIFT;
++	start_pfn++;
++	memset((void *)va, 0, PAGE_SIZE);
++	return (void *)va;
++} 
++
++#define PTE_SIZE PAGE_SIZE
++
++static inline int make_readonly(unsigned long paddr)
++{
++	extern char __vsyscall_0;
++	int readonly = 0;
++
++	/* Make new page tables read-only. */
++	if (!xen_feature(XENFEAT_writable_page_tables)
++	    && (paddr >= (table_start << PAGE_SHIFT))
++	    && (paddr < (table_end << PAGE_SHIFT)))
++		readonly = 1;
++	/* Make old page tables read-only. */
++	if (!xen_feature(XENFEAT_writable_page_tables)
++	    && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
++	    && (paddr < (start_pfn << PAGE_SHIFT)))
++		readonly = 1;
++
++	/*
++	 * No need for writable mapping of kernel image. This also ensures that
++	 * page and descriptor tables embedded inside don't have writable
++	 * mappings. Exclude the vsyscall area here, allowing alternative
++	 * instruction patching to work.
++	 */
++	if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
++	    && !(paddr >= __pa_symbol(&__vsyscall_0)
++	         && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
++		readonly = 1;
++
++	return readonly;
++}
++
++#ifndef CONFIG_XEN
++/* Must run before zap_low_mappings */
++__init void *early_ioremap(unsigned long addr, unsigned long size)
++{
++	unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
++
++	/* actually usually some more */
++	if (size >= LARGE_PAGE_SIZE) {
++		printk("SMBIOS area too long %lu\n", size);
++		return NULL;
++	}
++	set_pmd(temp_mappings[0].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++	map += LARGE_PAGE_SIZE;
++	set_pmd(temp_mappings[1].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++	__flush_tlb();
++	return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
++}
++
++/* To avoid virtual aliases later */
++__init void early_iounmap(void *addr, unsigned long size)
++{
++	if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
++		printk("early_iounmap: bad address %p\n", addr);
++	set_pmd(temp_mappings[0].pmd, __pmd(0));
++	set_pmd(temp_mappings[1].pmd, __pmd(0));
++	__flush_tlb();
++}
++#endif
++
++static void __meminit
++phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++{
++	int i, k;
++
++	for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++		unsigned long pte_phys;
++		pte_t *pte, *pte_save;
++
++		if (address >= end)
++			break;
++		pte = alloc_static_page(&pte_phys);
++		pte_save = pte;
++		for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
++			unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
++
++			if (address >= (after_bootmem
++			                ? end
++			                : xen_start_info->nr_pages << PAGE_SHIFT))
++				pteval = 0;
++			else if (make_readonly(address))
++				pteval &= ~_PAGE_RW;
++			set_pte(pte, __pte(pteval & __supported_pte_mask));
++		}
++		if (!after_bootmem) {
++			early_make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++			*pmd = __pmd(pte_phys | _KERNPG_TABLE);
++		} else {
++			make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++			set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++		}
++	}
++}
++
++static void __meminit
++phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
++{
++	pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
++
++	if (pmd_none(*pmd)) {
++		spin_lock(&init_mm.page_table_lock);
++		phys_pmd_init(pmd, address, end);
++		spin_unlock(&init_mm.page_table_lock);
++		__flush_tlb_all();
++	}
++}
++
++static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++{ 
++	long i = pud_index(address);
++
++	pud = pud + i;
++
++	if (after_bootmem && pud_val(*pud)) {
++		phys_pmd_update(pud, address, end);
++		return;
++	}
++
++	for (; i < PTRS_PER_PUD; pud++, i++) {
++		unsigned long paddr, pmd_phys;
++		pmd_t *pmd;
++
++		paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
++		if (paddr >= end)
++			break;
++
++		pmd = alloc_static_page(&pmd_phys);
++
++		spin_lock(&init_mm.page_table_lock);
++		*pud = __pud(pmd_phys | _KERNPG_TABLE);
++		phys_pmd_init(pmd, paddr, end);
++		spin_unlock(&init_mm.page_table_lock);
++
++		early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
++	}
++	__flush_tlb();
++} 
++
++void __init xen_init_pt(void)
++{
++	unsigned long addr, *page;
++
++	/* Find the initial pte page that was built for us. */
++	page = (unsigned long *)xen_start_info->pt_base;
++	addr = page[pgd_index(__START_KERNEL_map)];
++	addr_to_page(addr, page);
++	addr = page[pud_index(__START_KERNEL_map)];
++	addr_to_page(addr, page);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++	/* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
++	   in kernel PTEs. We check that here. */
++	if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
++		unsigned long *pg;
++		pte_t pte;
++
++		/* Mess with the initial mapping of page 0. It's not needed. */
++		BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
++		addr = page[pmd_index(__START_KERNEL_map)];
++		addr_to_page(addr, pg);
++		pte.pte = pg[pte_index(__START_KERNEL_map)];
++		BUG_ON(!(pte.pte & _PAGE_PRESENT));
++
++		/* If _PAGE_USER isn't set, we obviously do not need it. */
++		if (pte.pte & _PAGE_USER) {
++			/* _PAGE_USER is needed, but is it set implicitly? */
++			pte.pte &= ~_PAGE_USER;
++			if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
++							  pte, 0) != 0) ||
++			    !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
++				/* We need to explicitly specify _PAGE_USER. */
++				__kernel_page_user = _PAGE_USER;
++		}
++	}
++#endif
++
++	/* Construct mapping of initial pte page in our own directories. */
++	init_level4_pgt[pgd_index(__START_KERNEL_map)] = 
++		__pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
++	level3_kernel_pgt[pud_index(__START_KERNEL_map)] = 
++		__pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
++	memcpy(level2_kernel_pgt, page, PAGE_SIZE);
++
++	__user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
++		__pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++
++	early_make_page_readonly(init_level4_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(__user_pgd(init_level4_pgt),
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level3_kernel_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level3_user_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level2_kernel_pgt,
++				 XENFEAT_writable_page_tables);
++
++	if (!xen_feature(XENFEAT_writable_page_tables)) {
++		xen_pgd_pin(__pa_symbol(init_level4_pgt));
++		xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
++	}
++}
++
++static void __init extend_init_mapping(unsigned long tables_space)
++{
++	unsigned long va = __START_KERNEL_map;
++	unsigned long phys, addr, *pte_page;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++	unsigned long *page = (unsigned long *)init_level4_pgt;
++
++	addr = page[pgd_index(va)];
++	addr_to_page(addr, page);
++	addr = page[pud_index(va)];
++	addr_to_page(addr, page);
++
++	/* Kill mapping of low 1MB. */
++	while (va < (unsigned long)&_text) {
++		if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++			BUG();
++		va += PAGE_SIZE;
++	}
++
++	/* Ensure init mappings cover kernel text/data and initial tables. */
++	while (va < (__START_KERNEL_map
++		     + (start_pfn << PAGE_SHIFT)
++		     + tables_space)) {
++		pmd = (pmd_t *)&page[pmd_index(va)];
++		if (pmd_none(*pmd)) {
++			pte_page = alloc_static_page(&phys);
++			early_make_page_readonly(
++				pte_page, XENFEAT_writable_page_tables);
++			set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
++		} else {
++			addr = page[pmd_index(va)];
++			addr_to_page(addr, pte_page);
++		}
++		pte = (pte_t *)&pte_page[pte_index(va)];
++		if (pte_none(*pte)) {
++			new_pte = pfn_pte(
++				(va - __START_KERNEL_map) >> PAGE_SHIFT, 
++				__pgprot(_KERNPG_TABLE));
++			xen_l1_entry_update(pte, new_pte);
++		}
++		va += PAGE_SIZE;
++	}
++
++	/* Finally, blow away any spurious initial mappings. */
++	while (1) {
++		pmd = (pmd_t *)&page[pmd_index(va)];
++		if (pmd_none(*pmd))
++			break;
++		if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++			BUG();
++		va += PAGE_SIZE;
++	}
++}
++
++static void __init find_early_table_space(unsigned long end)
++{
++	unsigned long puds, pmds, ptes, tables;
++
++	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
++	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
++	ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++
++	tables = round_up(puds * 8, PAGE_SIZE) + 
++		round_up(pmds * 8, PAGE_SIZE) + 
++		round_up(ptes * 8, PAGE_SIZE); 
++
++	extend_init_mapping(tables);
++
++	table_start = start_pfn;
++	table_end = table_start + (tables>>PAGE_SHIFT);
++
++	early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
++		end, table_start << PAGE_SHIFT,
++		(table_start << PAGE_SHIFT) + tables);
++}
++
++static void xen_finish_init_mapping(void)
++{
++	unsigned long i, start, end;
++
++	/* Re-vector virtual addresses pointing into the initial
++	   mapping to the just-established permanent ones. */
++	xen_start_info = __va(__pa(xen_start_info));
++	xen_start_info->pt_base = (unsigned long)
++		__va(__pa(xen_start_info->pt_base));
++	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++		phys_to_machine_mapping =
++			__va(__pa(xen_start_info->mfn_list));
++		xen_start_info->mfn_list = (unsigned long)
++			phys_to_machine_mapping;
++	}
++	if (xen_start_info->mod_start)
++		xen_start_info->mod_start = (unsigned long)
++			__va(__pa(xen_start_info->mod_start));
++
++	/* Destroy the Xen-created mappings beyond the kernel image as
++	 * well as the temporary mappings created above. Prevents
++	 * overlap with modules area (if init mapping is very big).
++	 */
++	start = PAGE_ALIGN((unsigned long)_end);
++	end   = __START_KERNEL_map + (table_end << PAGE_SHIFT);
++	for (; start < end; start += PAGE_SIZE)
++		if (HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0))
++			BUG();
++
++	/* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
++	table_end = ~0UL;
++
++	/*
++	 * Prefetch pte's for the bt_ioremap() area. It gets used before the
++	 * boot-time allocator is online, so allocate-on-demand would fail.
++	 */
++	for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
++		__set_fixmap(i, 0, __pgprot(0));
++
++	/* Switch to the real shared_info page, and clear the dummy page. */
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++	memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++	/* Set up mapping of lowest 1MB of physical memory. */
++	for (i = 0; i < NR_FIX_ISAMAPS; i++)
++		if (is_initial_xendomain())
++			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++		else
++			__set_fixmap(FIX_ISAMAP_BEGIN - i,
++				     virt_to_mfn(empty_zero_page)
++				     << PAGE_SHIFT,
++				     PAGE_KERNEL_RO);
++
++	/* Disable the 'start_pfn' allocator. */
++	table_end = start_pfn;
++}
++
++/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
++   This runs before bootmem is initialized and gets pages directly from the 
++   physical memory. To access them they are temporarily mapped. */
++void __meminit init_memory_mapping(unsigned long start, unsigned long end)
++{ 
++	unsigned long next;
++
++	Dprintk("init_memory_mapping\n");
++
++	/* 
++	 * Find space for the kernel direct mapping tables.
++	 * Later we should allocate these tables in the local node of the memory
++	 * mapped.  Unfortunately this is done currently before the nodes are 
++	 * discovered.
++	 */
++	if (!after_bootmem)
++		find_early_table_space(end);
++
++	start = (unsigned long)__va(start);
++	end = (unsigned long)__va(end);
++
++	for (; start < end; start = next) {
++		unsigned long pud_phys; 
++		pgd_t *pgd = pgd_offset_k(start);
++		pud_t *pud;
++
++		if (after_bootmem)
++			pud = pud_offset(pgd, start & PGDIR_MASK);
++		else
++			pud = alloc_static_page(&pud_phys);
++		next = start + PGDIR_SIZE;
++		if (next > end) 
++			next = end; 
++		phys_pud_init(pud, __pa(start), __pa(next));
++		if (!after_bootmem) {
++			early_make_page_readonly(pud, XENFEAT_writable_page_tables);
++			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++		}
++	}
++
++	if (!after_bootmem) {
++		BUG_ON(start_pfn != table_end);
++		xen_finish_init_mapping();
++	}
++
++	__flush_tlb_all();
++}
++
++void __cpuinit zap_low_mappings(int cpu)
++{
++	/* this is not required for Xen */
++#if 0
++	swap_low_mappings();
++#endif
++}
++
++/* Compute zone sizes for the DMA and DMA32 zones in a node. */
++__init void
++size_zones(unsigned long *z, unsigned long *h,
++	   unsigned long start_pfn, unsigned long end_pfn)
++{
++ 	int i;
++ 	unsigned long w;
++
++ 	for (i = 0; i < MAX_NR_ZONES; i++)
++ 		z[i] = 0;
++
++ 	if (start_pfn < MAX_DMA_PFN)
++ 		z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
++ 	if (start_pfn < MAX_DMA32_PFN) {
++ 		unsigned long dma32_pfn = MAX_DMA32_PFN;
++ 		if (dma32_pfn > end_pfn)
++ 			dma32_pfn = end_pfn;
++ 		z[ZONE_DMA32] = dma32_pfn - start_pfn;
++ 	}
++ 	z[ZONE_NORMAL] = end_pfn - start_pfn;
++
++ 	/* Remove lower zones from higher ones. */
++ 	w = 0;
++ 	for (i = 0; i < MAX_NR_ZONES; i++) {
++ 		if (z[i])
++ 			z[i] -= w;
++ 	        w += z[i];
++	}
++
++	/* Compute holes */
++	w = start_pfn;
++	for (i = 0; i < MAX_NR_ZONES; i++) {
++		unsigned long s = w;
++		w += z[i];
++		h[i] = e820_hole_size(s, w);
++	}
++
++	/* Add the space pace needed for mem_map to the holes too. */
++	for (i = 0; i < MAX_NR_ZONES; i++)
++		h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
++
++	/* The 16MB DMA zone has the kernel and other misc mappings.
++ 	   Account them too */
++	if (h[ZONE_DMA]) {
++		h[ZONE_DMA] += dma_reserve;
++		if (h[ZONE_DMA] >= z[ZONE_DMA]) {
++			printk(KERN_WARNING
++				"Kernel too large and filling up ZONE_DMA?\n");
++			h[ZONE_DMA] = z[ZONE_DMA];
++		}
++	}
++}
++
++#ifndef CONFIG_NUMA
++void __init paging_init(void)
++{
++	unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++
++	memory_present(0, 0, end_pfn);
++	sparse_init();
++	size_zones(zones, holes, 0, end_pfn);
++	free_area_init_node(0, NODE_DATA(0), zones,
++			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++
++	init_mm.context.pinned = 1;
++}
++#endif
++
++/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
++   from the CPU leading to inconsistent cache lines. address and size
++   must be aligned to 2MB boundaries. 
++   Does nothing when the mapping doesn't exist. */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
++{
++	unsigned long end = address + size;
++
++	BUG_ON(address & ~LARGE_PAGE_MASK);
++	BUG_ON(size & ~LARGE_PAGE_MASK); 
++	
++	for (; address < end; address += LARGE_PAGE_SIZE) { 
++		pgd_t *pgd = pgd_offset_k(address);
++		pud_t *pud;
++		pmd_t *pmd;
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, address);
++		if (pud_none(*pud))
++			continue; 
++		pmd = pmd_offset(pud, address);
++		if (!pmd || pmd_none(*pmd))
++			continue; 
++		if (0 == (__pmd_val(*pmd) & _PAGE_PSE)) {
++			/* Could handle this, but it should not happen currently. */
++			printk(KERN_ERR 
++	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
++			pmd_ERROR(*pmd); 
++		}
++		set_pmd(pmd, __pmd(0)); 		
++	}
++	__flush_tlb_all();
++} 
++
++/*
++ * Memory hotplug specific functions
++ */
++void online_page(struct page *page)
++{
++	ClearPageReserved(page);
++	init_page_count(page);
++	__free_page(page);
++	totalram_pages++;
++	num_physpages++;
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++/*
++ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
++ *	via probe interface of sysfs. If acpi notifies hot-add event, then it
++ *	can tell node id by searching dsdt. But, probe interface doesn't have
++ *	node id. So, return 0 as node id at this time.
++ */
++#ifdef CONFIG_NUMA
++int memory_add_physaddr_to_nid(u64 start)
++{
++	return 0;
++}
++#endif
++
++/*
++ * Memory is added always to NORMAL zone. This means you will never get
++ * additional DMA/DMA32 memory.
++ */
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++	struct pglist_data *pgdat = NODE_DATA(nid);
++	struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++	unsigned long start_pfn = start >> PAGE_SHIFT;
++	unsigned long nr_pages = size >> PAGE_SHIFT;
++	int ret;
++
++	ret = __add_pages(zone, start_pfn, nr_pages);
++	if (ret)
++		goto error;
++
++	init_memory_mapping(start, (start + size -1));
++
++	return ret;
++error:
++	printk("%s: Problem encountered in __add_pages!\n", __func__);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(arch_add_memory);
++
++int remove_memory(u64 start, u64 size)
++{
++	return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++
++#else /* CONFIG_MEMORY_HOTPLUG */
++/*
++ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
++ * just online the pages.
++ */
++int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
++{
++	int err = -EIO;
++	unsigned long pfn;
++	unsigned long total = 0, mem = 0;
++	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
++		if (pfn_valid(pfn)) {
++			online_page(pfn_to_page(pfn));
++			err = 0;
++			mem++;
++		}
++		total++;
++	}
++	if (!err) {
++		z->spanned_pages += total;
++		z->present_pages += mem;
++		z->zone_pgdat->node_spanned_pages += total;
++		z->zone_pgdat->node_present_pages += mem;
++	}
++	return err;
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
++			 kcore_vsyscall;
++
++void __init mem_init(void)
++{
++	long codesize, reservedpages, datasize, initsize;
++	unsigned long pfn;
++
++	contiguous_bitmap = alloc_bootmem_low_pages(
++		(end_pfn + 2*BITS_PER_LONG) >> 3);
++	BUG_ON(!contiguous_bitmap);
++	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
++
++	pci_iommu_alloc();
++
++	/* How many end-of-memory variables you have, grandma! */
++	max_low_pfn = end_pfn;
++	max_pfn = end_pfn;
++	num_physpages = end_pfn;
++	high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++
++	/* clear the zero-page */
++	memset(empty_zero_page, 0, PAGE_SIZE);
++
++	reservedpages = 0;
++
++	/* this will put all low memory onto the freelists */
++#ifdef CONFIG_NUMA
++	totalram_pages = numa_free_all_bootmem();
++#else
++	totalram_pages = free_all_bootmem();
++#endif
++	/* XEN: init and count pages outside initial allocation. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++		ClearPageReserved(pfn_to_page(pfn));
++		init_page_count(pfn_to_page(pfn));
++		totalram_pages++;
++	}
++	reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++
++	after_bootmem = 1;
++
++	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
++	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++	/* Register memory areas for /proc/kcore */
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++		   VMALLOC_END-VMALLOC_START);
++	kclist_add(&kcore_kernel, &_stext, _end - _stext);
++	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
++	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
++				 VSYSCALL_END - VSYSCALL_START);
++
++	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
++		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++		end_pfn << (PAGE_SHIFT-10),
++		codesize >> 10,
++		reservedpages << (PAGE_SHIFT-10),
++		datasize >> 10,
++		initsize >> 10);
++
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++	/*
++	 * Sync boot_level4_pgt mappings with the init_level4_pgt
++	 * except for the low identity mappings which are already zapped
++	 * in init_level4_pgt. This sync-up is essential for AP's bringup
++	 */
++	memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
++#endif
++#endif
++}
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++	unsigned long addr;
++
++	if (begin >= end)
++		return;
++
++	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++	for (addr = begin; addr < end; addr += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(addr));
++		init_page_count(virt_to_page(addr));
++		memset((void *)(addr & ~(PAGE_SIZE-1)),
++		       POISON_FREE_INITMEM, PAGE_SIZE); 
++		if (addr >= __START_KERNEL_map) {
++			/* make_readonly() reports all kernel addresses. */
++			__make_page_writable(__va(__pa(addr)));
++			if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++				pgd_t *pgd = pgd_offset_k(addr);
++				pud_t *pud = pud_offset(pgd, addr);
++				pmd_t *pmd = pmd_offset(pud, addr);
++				pte_t *pte = pte_offset_kernel(pmd, addr);
++
++				xen_l1_entry_update(pte, __pte(0)); /* fallback */
++			}
++		}
++		free_page(addr);
++		totalram_pages++;
++	}
++}
++
++void free_initmem(void)
++{
++	memset(__initdata_begin, POISON_FREE_INITDATA,
++		__initdata_end - __initdata_begin);
++	free_init_pages("unused kernel memory",
++			(unsigned long)(&__init_begin),
++			(unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++	unsigned long addr = (unsigned long)__start_rodata;
++
++	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++		change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++
++	printk ("Write protecting the kernel read-only data: %luk\n",
++			(__end_rodata - __start_rodata) >> 10);
++
++	/*
++	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
++	 * We do this after the printk so that if something went wrong in the
++	 * change, the printk gets out at least to give a better debug hint
++	 * of who is the culprit.
++	 */
++	global_flush_tlb();
++}
++#endif
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++	free_init_pages("initrd memory", start, end);
++}
++#endif
++
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
++{ 
++	/* Should check here against the e820 map to avoid double free */ 
++#ifdef CONFIG_NUMA
++	int nid = phys_to_nid(phys);
++  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else       		
++	reserve_bootmem(phys, len);    
++#endif
++	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++		dma_reserve += len / PAGE_SIZE;
++}
++
++int kern_addr_valid(unsigned long addr) 
++{ 
++	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
++       pgd_t *pgd;
++       pud_t *pud;
++       pmd_t *pmd;
++       pte_t *pte;
++
++	if (above != 0 && above != -1UL)
++		return 0; 
++	
++	pgd = pgd_offset_k(addr);
++	if (pgd_none(*pgd))
++		return 0;
++
++	pud = pud_offset(pgd, addr);
++	if (pud_none(*pud))
++		return 0; 
++
++	pmd = pmd_offset(pud, addr);
++	if (pmd_none(*pmd))
++		return 0;
++	if (pmd_large(*pmd))
++		return pfn_valid(pmd_pfn(*pmd));
++
++	pte = pte_offset_kernel(pmd, addr);
++	if (pte_none(*pte))
++		return 0;
++	return pfn_valid(pte_pfn(*pte));
++}
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++
++extern int exception_trace, page_fault_trace;
++
++static ctl_table debug_table2[] = {
++	{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
++	  proc_dointvec },
++	{ 0, }
++}; 
++
++static ctl_table debug_root_table2[] = { 
++	{ .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, 
++	   .child = debug_table2 }, 
++	{ 0 }, 
++}; 
++
++static __init int x8664_sysctl_init(void)
++{ 
++	register_sysctl_table(debug_root_table2, 1);
++	return 0;
++}
++__initcall(x8664_sysctl_init);
++#endif
++
++/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
++   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++   not need special handling anymore. */
++
++static struct vm_area_struct gate_vma = {
++	.vm_start = VSYSCALL_START,
++	.vm_end = VSYSCALL_END,
++	.vm_page_prot = PAGE_READONLY
++};
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_EMULATION
++	if (test_tsk_thread_flag(tsk, TIF_IA32))
++		return NULL;
++#endif
++	return &gate_vma;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++	struct vm_area_struct *vma = get_gate_vma(task);
++	if (!vma)
++		return 0;
++	return (addr >= vma->vm_start) && (addr < vma->vm_end);
++}
++
++/* Use this when you have no reliable task/vma, typically from interrupt
++ * context.  It is less reliable than using the task's vma and may give
++ * false positives.
++ */
++int in_gate_area_no_task(unsigned long addr)
++{
++	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++}
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/mm/pageattr-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/x86_64/mm/pageattr-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,502 @@
++/* 
++ * Copyright 2002 Andi Kleen, SuSE Labs. 
++ * Thanks to Ben LaHaise for precious feedback.
++ */ 
++
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++
++#ifdef CONFIG_XEN
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++
++LIST_HEAD(mm_unpinned);
++DEFINE_SPINLOCK(mm_unpinned_lock);
++
++static void _pin_lock(struct mm_struct *mm, int lock) {
++	if (lock)
++		spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++	/* While mm->page_table_lock protects us against insertions and
++	 * removals of higher level page table pages, it doesn't protect
++	 * against updates of pte-s. Such updates, however, require the
++	 * pte pages to be in consistent state (unpinned+writable or
++	 * pinned+readonly). The pinning and attribute changes, however
++	 * cannot be done atomically, which is why such updates must be
++	 * prevented from happening concurrently.
++	 * Note that no pte lock can ever elsewhere be acquired nesting
++	 * with an already acquired one in the same mm, or with the mm's
++	 * page_table_lock already acquired, as that would break in the
++	 * non-split case (where all these are actually resolving to the
++	 * one page_table_lock). Thus acquiring all of them here is not
++	 * going to result in dead locks, and the order of acquires
++	 * doesn't matter.
++	 */
++	{
++		pgd_t *pgd = mm->pgd;
++		unsigned g;
++
++		for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++			pud_t *pud;
++			unsigned u;
++
++			if (pgd_none(*pgd))
++				continue;
++			pud = pud_offset(pgd, 0);
++			for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++				pmd_t *pmd;
++				unsigned m;
++
++				if (pud_none(*pud))
++					continue;
++				pmd = pmd_offset(pud, 0);
++				for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++					spinlock_t *ptl;
++
++					if (pmd_none(*pmd))
++						continue;
++					ptl = pte_lockptr(0, pmd);
++					if (lock)
++						spin_lock(ptl);
++					else
++						spin_unlock(ptl);
++				}
++			}
++		}
++	}
++#endif
++	if (!lock)
++		spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 8
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
++                                            unsigned int cpu, unsigned int seq)
++{
++	struct page *page = virt_to_page(pt);
++	unsigned long pfn = page_to_pfn(page);
++
++	MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++		(unsigned long)__va(pfn << PAGE_SHIFT),
++		pfn_pte(pfn, flags), 0);
++	if (unlikely(++seq == PIN_BATCH)) {
++		if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++	                                                PIN_BATCH, NULL)))
++			BUG();
++		seq = 0;
++	}
++
++	return seq;
++}
++
++static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++{
++	pgd_t       *pgd;
++	pud_t       *pud;
++	pmd_t       *pmd;
++	pte_t       *pte;
++	int          g,u,m;
++	unsigned int cpu, seq;
++	multicall_entry_t *mcl;
++
++	pgd = mm->pgd;
++	cpu = get_cpu();
++
++	/*
++	 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
++	 * be the 'current' task's pagetables (e.g., current may be 32-bit,
++	 * but the pagetables may be for a 64-bit task).
++	 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
++	 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
++	 */
++	for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, 0);
++		if (PTRS_PER_PUD > 1) /* not folded */ 
++			seq = mm_walk_set_prot(pud,flags,cpu,seq);
++		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++			if (pud_none(*pud))
++				continue;
++			pmd = pmd_offset(pud, 0);
++			if (PTRS_PER_PMD > 1) /* not folded */ 
++				seq = mm_walk_set_prot(pmd,flags,cpu,seq);
++			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++				if (pmd_none(*pmd))
++					continue;
++				pte = pte_offset_kernel(pmd,0);
++				seq = mm_walk_set_prot(pte,flags,cpu,seq);
++			}
++		}
++	}
++
++	mcl = per_cpu(pb_mcl, cpu);
++	if (unlikely(seq > PIN_BATCH - 2)) {
++		if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
++			BUG();
++		seq = 0;
++	}
++	MULTI_update_va_mapping(mcl + seq,
++	       (unsigned long)__user_pgd(mm->pgd),
++	       pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
++	       0);
++	MULTI_update_va_mapping(mcl + seq + 1,
++	       (unsigned long)mm->pgd,
++	       pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
++	       UVMF_TLB_FLUSH);
++	if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
++		BUG();
++
++	put_cpu();
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++
++	pin_lock(mm);
++
++	mm_walk(mm, PAGE_KERNEL_RO);
++	xen_pgd_pin(__pa(mm->pgd)); /* kernel */
++	xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
++	mm->context.pinned = 1;
++	spin_lock(&mm_unpinned_lock);
++	list_del(&mm->context.unpinned);
++	spin_unlock(&mm_unpinned_lock);
++
++	pin_unlock(mm);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++
++	pin_lock(mm);
++
++	xen_pgd_unpin(__pa(mm->pgd));
++	xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
++	mm_walk(mm, PAGE_KERNEL);
++	mm->context.pinned = 0;
++	spin_lock(&mm_unpinned_lock);
++	list_add(&mm->context.unpinned, &mm_unpinned);
++	spin_unlock(&mm_unpinned_lock);
++
++	pin_unlock(mm);
++}
++
++void mm_pin_all(void)
++{
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++
++	/*
++	 * Allow uninterrupted access to the mm_unpinned list. We don't
++	 * actually take the mm_unpinned_lock as it is taken inside mm_pin().
++	 * All other CPUs must be at a safe point (e.g., in stop_machine
++	 * or offlined entirely).
++	 */
++	preempt_disable();
++	while (!list_empty(&mm_unpinned))	
++		mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
++				  context.unpinned));
++	preempt_enable();
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++	if (!mm->context.pinned)
++		mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++	struct task_struct *tsk = current;
++
++	task_lock(tsk);
++
++	/*
++	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++	 */
++	if (tsk->active_mm == mm) {
++		tsk->active_mm = &init_mm;
++		atomic_inc(&init_mm.mm_count);
++
++		switch_mm(mm, &init_mm, tsk);
++
++		atomic_dec(&mm->mm_count);
++		BUG_ON(atomic_read(&mm->mm_count) == 0);
++	}
++
++	task_unlock(tsk);
++
++	if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
++	     !mm->context.has_foreign_mappings )
++		mm_unpin(mm);
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++	struct page *pte;
++
++	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++	if (pte) {
++		SetPageForeign(pte, pte_free);
++		init_page_count(pte);
++	}
++	return pte;
++}
++
++void pte_free(struct page *pte)
++{
++	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++	if (!pte_write(*virt_to_ptep(va)))
++		if (HYPERVISOR_update_va_mapping(
++			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
++			BUG();
++
++	ClearPageForeign(pte);
++	init_page_count(pte);
++
++	__free_page(pte);
++}
++#endif	/* CONFIG_XEN */
++
++pte_t *lookup_address(unsigned long address) 
++{ 
++	pgd_t *pgd = pgd_offset_k(address);
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	if (pgd_none(*pgd))
++		return NULL;
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return NULL; 
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return NULL; 
++	if (pmd_large(*pmd))
++		return (pte_t *)pmd;
++	pte = pte_offset_kernel(pmd, address);
++	if (pte && !pte_present(*pte))
++		pte = NULL; 
++	return pte;
++} 
++
++static struct page *split_large_page(unsigned long address, pgprot_t prot,
++				     pgprot_t ref_prot)
++{ 
++	int i; 
++	unsigned long addr;
++	struct page *base = alloc_pages(GFP_KERNEL, 0);
++	pte_t *pbase;
++	if (!base) 
++		return NULL;
++	/*
++	 * page_private is used to track the number of entries in
++	 * the page table page have non standard attributes.
++	 */
++	SetPagePrivate(base);
++	page_private(base) = 0;
++
++	address = __pa(address);
++	addr = address & LARGE_PAGE_MASK; 
++	pbase = (pte_t *)page_address(base);
++	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
++		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
++				   addr == address ? prot : ref_prot);
++	}
++	return base;
++} 
++
++
++static void flush_kernel_map(void *address) 
++{
++	if (0 && address && cpu_has_clflush) {
++		/* is this worth it? */ 
++		int i;
++		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
++			asm volatile("clflush (%0)" :: "r" (address + i)); 
++	} else
++		asm volatile("wbinvd":::"memory"); 
++	if (address)
++		__flush_tlb_one(address);
++	else
++		__flush_tlb_all();
++}
++
++
++static inline void flush_map(unsigned long address)
++{	
++	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++}
++
++static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++
++static inline void save_page(struct page *fpage)
++{
++	fpage->lru.next = (struct list_head *)deferred_pages;
++	deferred_pages = fpage;
++}
++
++/* 
++ * No more special protections in this 2/4MB area - revert to a
++ * large page again. 
++ */
++static void revert_page(unsigned long address, pgprot_t ref_prot)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t large_pte;
++
++	pgd = pgd_offset_k(address);
++	BUG_ON(pgd_none(*pgd));
++	pud = pud_offset(pgd,address);
++	BUG_ON(pud_none(*pud));
++	pmd = pmd_offset(pud, address);
++	BUG_ON(__pmd_val(*pmd) & _PAGE_PSE);
++	pgprot_val(ref_prot) |= _PAGE_PSE;
++	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++	set_pte((pte_t *)pmd, large_pte);
++}      
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
++				   pgprot_t ref_prot)
++{ 
++	pte_t *kpte; 
++	struct page *kpte_page;
++	unsigned kpte_flags;
++	pgprot_t ref_prot2;
++	kpte = lookup_address(address);
++	if (!kpte) return 0;
++	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
++	kpte_flags = pte_val(*kpte); 
++	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
++		if ((kpte_flags & _PAGE_PSE) == 0) { 
++			set_pte(kpte, pfn_pte(pfn, prot));
++		} else {
++ 			/*
++			 * split_large_page will take the reference for this
++			 * change_page_attr on the split page.
++ 			 */
++
++			struct page *split;
++			ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
++
++			split = split_large_page(address, prot, ref_prot2);
++			if (!split)
++				return -ENOMEM;
++			set_pte(kpte,mk_pte(split, ref_prot2));
++			kpte_page = split;
++		}	
++		page_private(kpte_page)++;
++	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
++		set_pte(kpte, pfn_pte(pfn, ref_prot));
++		BUG_ON(page_private(kpte_page) == 0);
++		page_private(kpte_page)--;
++	} else
++		BUG();
++
++	/* on x86-64 the direct mapping set at boot is not using 4k pages */
++	/*
++	 * ..., but the XEN guest kernels (currently) do:
++	 * If the pte was reserved, it means it was created at boot
++	 * time (not via split_large_page) and in turn we must not
++	 * replace it with a large page.
++	 */
++#ifndef CONFIG_XEN
++ 	BUG_ON(PageReserved(kpte_page));
++#else
++	if (PageReserved(kpte_page))
++		return 0;
++#endif
++
++	if (page_private(kpte_page) == 0) {
++		save_page(kpte_page);
++		revert_page(address, ref_prot);
++	}
++	return 0;
++} 
++
++/*
++ * Change the page attributes of an page in the linear mapping.
++ *
++ * This should be used when a page is mapped with a different caching policy
++ * than write-back somewhere - some CPUs do not like it when mappings with
++ * different caching policies exist. This changes the page attributes of the
++ * in kernel linear mapping too.
++ * 
++ * The caller needs to ensure that there are no conflicting mappings elsewhere.
++ * This function only deals with the kernel linear map.
++ * 
++ * Caller must call global_flush_tlb() after this.
++ */
++int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++{
++	int err = 0; 
++	int i; 
++
++	down_write(&init_mm.mmap_sem);
++	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
++		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++
++		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++		if (err) 
++			break; 
++		/* Handle kernel mapping too which aliases part of the
++		 * lowmem */
++		if (__pa(address) < KERNEL_TEXT_SIZE) {
++			unsigned long addr2;
++			pgprot_t prot2 = prot;
++			addr2 = __START_KERNEL_map + __pa(address);
++ 			pgprot_val(prot2) &= ~_PAGE_NX;
++			err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++		} 
++	} 	
++	up_write(&init_mm.mmap_sem); 
++	return err;
++}
++
++/* Don't call this for MMIO areas that may not have a mem_map entry */
++int change_page_attr(struct page *page, int numpages, pgprot_t prot)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++	return change_page_attr_addr(addr, numpages, prot);
++}
++
++void global_flush_tlb(void)
++{ 
++	struct page *dpage;
++
++	down_read(&init_mm.mmap_sem);
++	dpage = xchg(&deferred_pages, NULL);
++	up_read(&init_mm.mmap_sem);
++
++	flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
++	while (dpage) {
++		struct page *tmp = dpage;
++		dpage = (struct page *)dpage->lru.next;
++		ClearPagePrivate(tmp);
++		__free_page(tmp);
++	} 
++} 
++
++EXPORT_SYMBOL(change_page_attr);
++EXPORT_SYMBOL(global_flush_tlb);
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/oprofile/Makefile
+--- a/arch/x86_64/oprofile/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/oprofile/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -11,9 +11,15 @@
+ 	oprofilefs.o oprofile_stats.o \
+ 	timer_int.o )
+ 
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++			 xenoprofile.o)
++OPROFILE-y := xenoprof.o
++else
+ OPROFILE-y := init.o backtrace.o
+ OPROFILE-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o op_model_p4.o \
+ 				     op_model_ppro.o
+ OPROFILE-$(CONFIG_X86_IO_APIC)    += nmi_timer_int.o 
+-
+-oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
++endif
++oprofile-y = $(DRIVER_OBJS) $(XENOPROF_COMMON_OBJS) \
++	     $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
+diff -r d894e36cfc30 -r 0aa021803deb arch/x86_64/pci/Makefile
+--- a/arch/x86_64/pci/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/arch/x86_64/pci/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,8 +15,13 @@
+ 
+ obj-$(CONFIG_NUMA)	+= k8-bus.o
+ 
++# pcifront should be after mmconfig.o and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront.o
++
+ direct-y += ../../i386/pci/direct.o
+ acpi-y   += ../../i386/pci/acpi.o
++pcifront-y += ../../i386/pci/pcifront.o
+ legacy-y += ../../i386/pci/legacy.o
+ irq-y    += ../../i386/pci/irq.o
+ common-y += ../../i386/pci/common.o
+diff -r d894e36cfc30 -r 0aa021803deb block/elevator.c
+--- a/block/elevator.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/block/elevator.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -493,6 +493,16 @@
+ 	int ret;
+ 
+ 	while ((rq = __elv_next_request(q)) != NULL) {
++		/*
++		 * Kill the empty barrier place holder, the driver must
++		 * not ever see it.
++		 */
++		if (blk_empty_barrier(rq)) {
++			blkdev_dequeue_request(rq);
++			end_that_request_chunk(rq, 1, 0);
++			end_that_request_last(rq, 1);
++			continue;
++		}
+ 		if (!(rq->flags & REQ_STARTED)) {
+ 			elevator_t *e = q->elevator;
+ 
+diff -r d894e36cfc30 -r 0aa021803deb block/ll_rw_blk.c
+--- a/block/ll_rw_blk.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/block/ll_rw_blk.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -483,9 +483,12 @@
+ 	 * Queue ordered sequence.  As we stack them at the head, we
+ 	 * need to queue in reverse order.  Note that we rely on that
+ 	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+-	 * request gets inbetween ordered sequence.
++	 * request gets inbetween ordered sequence. If this request is
++	 * an empty barrier, we don't need to do a postflush ever since
++	 * there will be no data written between the pre and post flush.
++	 * Hence a single flush will suffice.
+ 	 */
+-	if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
++	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
+ 		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+ 	else
+ 		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+@@ -2967,7 +2970,7 @@
+ {
+ 	struct block_device *bdev = bio->bi_bdev;
+ 
+-	if (bdev != bdev->bd_contains) {
++	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ 		struct hd_struct *p = bdev->bd_part;
+ 		const int rw = bio_data_dir(bio);
+ 
+@@ -3028,7 +3031,7 @@
+ 	might_sleep();
+ 	/* Test device or partition size, when known. */
+ 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+-	if (maxsector) {
++	if (maxsector && nr_sectors) {
+ 		sector_t sector = bio->bi_sector;
+ 
+ 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+@@ -3094,7 +3097,7 @@
+ 		old_dev = bio->bi_bdev->bd_dev;
+ 
+ 		maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+-		if (maxsector) {
++		if (maxsector && nr_sectors) {
+ 			sector_t sector = bio->bi_sector;
+ 
+ 			if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+@@ -3128,21 +3131,25 @@
+ {
+ 	int count = bio_sectors(bio);
+ 
+-	BIO_BUG_ON(!bio->bi_size);
+-	BIO_BUG_ON(!bio->bi_io_vec);
+ 	bio->bi_rw |= rw;
+-	if (rw & WRITE)
+-		count_vm_events(PGPGOUT, count);
+-	else
+-		count_vm_events(PGPGIN, count);
+ 
+-	if (unlikely(block_dump)) {
+-		char b[BDEVNAME_SIZE];
+-		printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+-			current->comm, current->pid,
+-			(rw & WRITE) ? "WRITE" : "READ",
+-			(unsigned long long)bio->bi_sector,
+-			bdevname(bio->bi_bdev,b));
++	if (!bio_empty_barrier(bio)) {
++		BIO_BUG_ON(!bio->bi_size);
++		BIO_BUG_ON(!bio->bi_io_vec);
++
++		if (rw & WRITE)
++			count_vm_events(PGPGOUT, count);
++		else
++			count_vm_events(PGPGIN, count);
++
++		if (unlikely(block_dump)) {
++			char b[BDEVNAME_SIZE];
++			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
++				current->comm, current->pid,
++				(rw & WRITE) ? "WRITE" : "READ",
++				(unsigned long long)bio->bi_sector,
++				bdevname(bio->bi_bdev,b));
++		}
+ 	}
+ 
+ 	generic_make_request(bio);
+@@ -3259,6 +3266,13 @@
+ 	total_bytes = bio_nbytes = 0;
+ 	while ((bio = req->bio) != NULL) {
+ 		int nbytes;
++
++		/* For an empty barrier request, the low level driver must
++		 * store a potential error location in ->sector. We pass
++		 * that back up in ->bi_sector
++		 */
++		if (blk_empty_barrier(req))
++			bio->bi_sector = req->sector;
+ 
+ 		if (nr_bytes >= bio->bi_size) {
+ 			req->bio = bio->bi_next;
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/conf.linux-native/00_xen_to_native
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/conf.linux-native/00_xen_to_native	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,86 @@
++# Linux kernel version: 2.6.16.13-native
++# Mon May 15 10:59:54 2006
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_XEN is not set
++# CONFIG_HPET_TIMER is not set
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_MCE is not set
++# CONFIG_X86_MSR is not set
++# CONFIG_SWIOTLB is not set
++# CONFIG_EDD is not set
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_STATIC=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_HIGHPTE is not set
++# CONFIG_MATH_EMULATION is not set
++# CONFIG_EFI is not set
++CONFIG_IRQBALANCE=y
++# CONFIG_KEXEC is not set
++CONFIG_DOUBLEFAULT=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_X86_PM_TIMER=y
++# APM (Advanced Power Management) BIOS Support
++#
++# CONFIG_APM is not set
++
++#
++CONFIG_PCI_BIOS=y
++# CONFIG_XEN_PCIDEV_FRONTEND is not set
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_ISA is not set
++# CONFIG_MCA is not set
++# CONFIG_HOTPLUG_PCI_COMPAQ is not set
++# CONFIG_HOTPLUG_PCI_IBM is not set
++# CONFIG_I2O_EXT_ADAPTEC_DMA64 is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_TCG_XEN is not set
++# CONFIG_HUGETLBFS is not set
++# CONFIG_XEN is not set
++# CONFIG_XEN_INTERFACE_VERSION is not set
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_PCIDEV_BACKEND is not set
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++# CONFIG_XEN_BLKDEV_BACKEND is not set
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++# CONFIG_XEN_NETDEV_BACKEND is not set
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++# CONFIG_XEN_BLKDEV_FRONTEND is not set
++# CONFIG_XEN_NETDEV_FRONTEND is not set
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SCRUB_PAGES is not set
++# CONFIG_XEN_DISABLE_SERIAL is not set
++# CONFIG_XEN_SYSFS is not set
++# CONFIG_XEN_COMPAT_030002_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++# CONFIG_XEN_COMPAT_030002 is not set
++# CONFIG_XEN_COMPAT_030004 is not set
++# CONFIG_HAVE_ARCH_ALLOC_SKB is not set
++# CONFIG_HAVE_ARCH_DEV_ALLOC_SKB is not set
++# CONFIG_NO_IDLE_HZ is not set
++CONFIG_X86_HT=y
++# CONFIG_X86_NO_TSS is not set
++# CONFIG_X86_NO_IDT is not set
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/create_config.sh
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/create_config.sh	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,54 @@
++#!/bin/sh
++set -e
++
++
++# Parse arguments
++#
++if [ $# -lt 1 -o $# -gt 4 ]; then
++    echo "Usage: $0 config-file EXTRAVERSION XEN_TARGET_ARCH XEN_SYSTYPE"
++    exit 1
++fi
++
++config_file=$1
++extraversion=$2
++target_arch=$3
++systype=$4
++
++
++# Start with initial config skeleton file, if any.
++# Derive from linux-defconfig_xen_x86_32 otherwise.
++#
++skeleton=buildconfigs/linux-defconfig_${extraversion#-}_${target_arch}${systype}
++[ -r $skeleton ] || skeleton=buildconfigs/linux-defconfig_xen_x86_32
++[ -r $skeleton.local ] && skeleton=$skeleton.local
++cp $skeleton $config_file
++
++echo "Using $skeleton as base config"
++
++# Update
++#
++filter_template="s/^#\{0,1\} *\(CONFIG[^= ]*\).*/\/^#\\\{0,1\\\} *\1[= ].*\/d/p"
++config_dirs="buildconfigs/conf.linux buildconfigs/conf.linux-${target_arch} buildconfigs/conf.linux${extraversion} buildconfigs/conf.linux-${target_arch}${extraversion}"
++
++for config_dir in $config_dirs
++do
++    if [ -d $config_dir ]; then
++        echo "Processing $config_dir..." 1>&2
++        # processing is done in alphanumeric order
++        find $config_dir -type f | sort | while read update
++        do
++            echo "   ... $update" 1>&2
++            # create the filter rules in a temp file
++            filter_rules=`mktemp -t xenupdateconf.XXXXXXXXXX`
++            sed -n "${filter_template}" < $update > $filter_rules
++
++            # filter the config file in place, removing any options that
++            # will be updated.
++            sed -f $filter_rules -i $config_file
++            cat $update >> $config_file
++
++            # clean up
++            rm -f $filter_rules
++        done
++    fi
++done
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen0_ia64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen0_ia64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1703 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:09 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_IA64_GENERIC=y
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_IA64_XEN is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++CONFIG_IA64_CYCLONE=y
++CONFIG_IOSAPIC=y
++# CONFIG_IA64_SGI_SN_XP is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM_MANUAL=y
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_NEED_MULTIPLE_NODES=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_MIGRATION is not set
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=10
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
++# CONFIG_IA32_SUPPORT is not set
++# CONFIG_IA64_MCA_RECOVERY is not set
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_SGI_SN=y
++
++#
++# SN Devices
++#
++# CONFIG_SGI_IOC3 is not set
++CONFIG_KEXEC=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++# CONFIG_HOTPLUG_PCI_SGI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=y
++CONFIG_BLK_DEV_NBD=m
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=y
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++# CONFIG_IDE_GENERIC is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++CONFIG_BLK_DEV_CMD64X=y
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=y
++# CONFIG_ARCNET_1201 is not set
++# CONFIG_ARCNET_1051 is not set
++# CONFIG_ARCNET_RAW is not set
++# CONFIG_ARCNET_CAP is not set
++# CONFIG_ARCNET_COM90xx is not set
++# CONFIG_ARCNET_COM90xxIO is not set
++# CONFIG_ARCNET_RIM_I is not set
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=y
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++# CONFIG_ISDN_I4L is not set
++
++#
++# CAPI subsystem
++#
++# CONFIG_ISDN_CAPI is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=y
++# CONFIG_GAMEPORT_NS558 is not set
++# CONFIG_GAMEPORT_L4 is not set
++# CONFIG_GAMEPORT_EMU10K1 is not set
++# CONFIG_GAMEPORT_FM801 is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_COMPUTONE is not set
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_DIGIEPCA is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_SPECIALIX is not set
++# CONFIG_SX is not set
++# CONFIG_RIO is not set
++# CONFIG_STALDRV is not set
++# CONFIG_SGI_SNSC is not set
++# CONFIG_SGI_TIOCX is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_SGI_L1_CONSOLE is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_AGP_I460=y
++# CONFIG_AGP_HP_ZX1 is not set
++# CONFIG_AGP_SGI_TIOCA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++# CONFIG_MMTIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_SEQ_DUMMY=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++CONFIG_SND_DUMMY=y
++CONFIG_SND_VIRMIDI=y
++# CONFIG_SND_MTPAV is not set
++CONFIG_SND_SERIAL_U16550=y
++CONFIG_SND_MPU401=y
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++CONFIG_SND_ATIIXP=y
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++# CONFIG_SOUND_BT878 is not set
++# CONFIG_SOUND_ES1371 is not set
++# CONFIG_SOUND_ICH is not set
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++# CONFIG_SOUND_VIA82CXXX is not set
++# CONFIG_SOUND_TVMIXER is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=y
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_XFS_RT is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_IOMEM_MACHINE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=y
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp437"
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
++
++#
++# HP Simulator drivers
++#
++# CONFIG_HP_SIMETH is not set
++# CONFIG_HP_SIMSERIAL is not set
++# CONFIG_HP_SIMSCSI is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=20
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER=y
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen0_x86_32
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen0_x86_32	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1458 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Oct 16 09:31:19 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=8
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=y
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++CONFIG_MTRR=y
++# CONFIG_REGPARM is not set
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++
++#
++# Power management options (ACPI, APM)
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOBIOS is not set
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++# CONFIG_PCI_GOXEN_FE is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++# CONFIG_SCx200 is not set
++CONFIG_K8_NB=y
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_NETBIOS_NS is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_PPTP is not set
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++# CONFIG_BLK_DEV_IDEPNP is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_CS5535 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_DPT_I2O is not set
++CONFIG_SCSI_ADVANSYS=y
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=y
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_MV is not set
++# CONFIG_SCSI_SATA_NV is not set
++# CONFIG_SCSI_PDC_ADMA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_PROMISE=y
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++CONFIG_SCSI_SATA_SIL24=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_CRYPT is not set
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++# CONFIG_NET_SB1000 is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_SONYPI is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++# CONFIG_AGP_EFFICEON is not set
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_NSC_GPIO is not set
++# CONFIG_CS5535_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++
++#
++# RTC drivers
++#
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=y
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen0_x86_64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen0_x86_64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1390 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:41:04 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=y
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_MTRR=y
++CONFIG_SMP=y
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=8
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
++
++#
++# Power management options
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_NETBIOS_NS is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_PPTP is not set
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++# CONFIG_BLK_DEV_IDEPNP is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=y
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_MV is not set
++# CONFIG_SCSI_SATA_NV is not set
++# CONFIG_SCSI_PDC_ADMA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_PROMISE=y
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++CONFIG_SCSI_SATA_SIL24=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_CRYPT is not set
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++# CONFIG_NET_SB1000 is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++# CONFIG_AGP_AMD64 is not set
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_VIA=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++
++#
++# RTC drivers
++#
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_X86_64 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++# CONFIG_XEN_SCSI_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=y
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xenU_ia64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xenU_ia64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1511 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:14 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++# CONFIG_MODULE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++# CONFIG_IA64_GENERIC is not set
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++CONFIG_IA64_XEN=y
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++# CONFIG_IA64_CYCLONE is not set
++CONFIG_IOSAPIC=y
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++# CONFIG_IA32_SUPPORT is not set
++CONFIG_IA64_MCA_RECOVERY=y
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_I8042 is not set
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=8
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++CONFIG_DRM_RADEON=y
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++# CONFIG_SND_SEQ_DUMMY is not set
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_VIRMIDI is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++# CONFIG_EXT2_FS_POSIX_ACL is not set
++# CONFIG_EXT2_FS_SECURITY is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++# CONFIG_NFSD_TCP is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=17
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++# CONFIG_XEN_GRANT_DEV is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++# CONFIG_XEN_DISABLE_SERIAL is not set
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xenU_x86_32
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xenU_x86_32	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,953 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Oct 16 09:31:29 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=8
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++# CONFIG_X86_MSR is not set
++CONFIG_X86_CPUID=y
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++# CONFIG_REGPARM is not set
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++# CONFIG_PCI is not set
++CONFIG_ISA_DMA_API=y
++# CONFIG_SCx200 is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# PHY device support
++#
++
++#
++# Ethernet (10 or 100Mbit)
++#
++# CONFIG_NET_ETHERNET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_VIA=y
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_NSC_GPIO is not set
++# CONFIG_CS5535_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
 +
-+ifdef CONFIG_XEN
-+AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+CFLAGS_syscall32-xen.o += -DUSE_INT80
-+AFLAGS_syscall32_syscall-xen.o += -DUSE_INT80
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 +
-+$(obj)/syscall32_syscall-xen.o: \
-+	$(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
++#
++# Sound
++#
++# CONFIG_SOUND is not set
 +
-+targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++#
++# USB support
++#
++# CONFIG_USB_ARCH_HAS_HCD is not set
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++# CONFIG_USB_ARCH_HAS_EHCI is not set
 +
-+include $(srctree)/scripts/Makefile.xen
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
 +
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/syscall32_syscall-xen.S tmp-linux-2.6-xen.patch/arch/x86_64/ia32/syscall32_syscall-xen.S
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/syscall32_syscall-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/syscall32_syscall-xen.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,28 @@
-+/* 32bit VDSOs mapped into user space. */
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
 +
-+	.section ".init.data","aw"
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
 +
-+#ifdef USE_INT80
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
 +
-+	.globl syscall32_int80
-+	.globl syscall32_int80_end
++#
++# LED drivers
++#
 +
-+syscall32_int80:
-+	.incbin "arch/x86_64/ia32/vsyscall-int80.so"
-+syscall32_int80_end:
++#
++# LED Triggers
++#
 +
-+#endif
++#
++# InfiniBand support
++#
 +
-+	.globl syscall32_syscall
-+	.globl syscall32_syscall_end
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
 +
-+syscall32_syscall:
-+	.incbin "arch/x86_64/ia32/vsyscall-syscall.so"
-+syscall32_syscall_end:
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
 +
-+	.globl syscall32_sysenter
-+	.globl syscall32_sysenter_end
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
 +
-+syscall32_sysenter:
-+	.incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
-+syscall32_sysenter_end:
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/syscall32-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/ia32/syscall32-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/syscall32-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/syscall32-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,128 @@
-+/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
++#
++# DMA Clients
++#
 +
-+/* vsyscall handling for 32bit processes. Map a stub page into it 
-+   on demand because 32bit cannot reach the kernel's fixmaps */
++#
++# DMA Devices
++#
 +
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/init.h>
-+#include <linux/stringify.h>
-+#include <linux/security.h>
-+#include <asm/proto.h>
-+#include <asm/tlbflush.h>
-+#include <asm/ia32_unistd.h>
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
 +
-+#ifdef USE_INT80
-+extern unsigned char syscall32_int80[], syscall32_int80_end[];
-+#endif
-+extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
-+extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
-+extern int sysctl_vsyscall32;
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
 +
-+char *syscall32_page; 
-+#ifndef USE_INT80
-+static int use_sysenter = -1;
-+#endif
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
 +
-+static struct page *
-+syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-+{
-+	struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
-+	get_page(p);
-+	return p;
-+}
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
 +
-+/* Prevent VMA merging */
-+static void syscall32_vma_close(struct vm_area_struct *vma)
-+{
-+}
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
 +
-+static struct vm_operations_struct syscall32_vm_ops = {
-+	.close = syscall32_vma_close,
-+	.nopage = syscall32_nopage,
-+};
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
 +
-+struct linux_binprm;
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
 +
-+/* Setup a VMA at program startup for the vsyscall page */
-+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
-+{
-+	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-+	struct vm_area_struct *vma;
-+	struct mm_struct *mm = current->mm;
-+	int ret;
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
 +
-+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+	if (!vma)
-+		return -ENOMEM;
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
 +
-+	memset(vma, 0, sizeof(struct vm_area_struct));
-+	/* Could randomize here */
-+	vma->vm_start = VSYSCALL32_BASE;
-+	vma->vm_end = VSYSCALL32_END;
-+	/* MAYWRITE to allow gdb to COW and set breakpoints */
-+	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
-+	vma->vm_flags |= mm->def_flags;
-+	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+	vma->vm_ops = &syscall32_vm_ops;
-+	vma->vm_mm = mm;
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
 +
-+	down_write(&mm->mmap_sem);
-+	if ((ret = insert_vm_struct(mm, vma))) {
-+		up_write(&mm->mmap_sem);
-+		kmem_cache_free(vm_area_cachep, vma);
-+		return ret;
-+	}
-+	mm->total_vm += npages;
-+	up_write(&mm->mmap_sem);
-+	return 0;
-+}
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
 +
-+static int __init init_syscall32(void)
-+{ 
-+	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
-+	if (!syscall32_page) 
-+		panic("Cannot allocate syscall32 page"); 
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
 +
-+#ifdef USE_INT80
-+	/*
-+	 * At this point we use int 0x80.
-+	 */
-+	memcpy(syscall32_page, syscall32_int80,
-+	       syscall32_int80_end - syscall32_int80);
-+#else
-+ 	if (use_sysenter > 0) {
-+ 		memcpy(syscall32_page, syscall32_sysenter,
-+ 		       syscall32_sysenter_end - syscall32_sysenter);
-+ 	} else {
-+  		memcpy(syscall32_page, syscall32_syscall,
-+  		       syscall32_syscall_end - syscall32_syscall);
-+  	}	
-+#endif
-+	return 0;
-+} 
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
 +
-+/*
-+ * This must be done early in case we have an initrd containing 32-bit
-+ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
-+ */	
-+core_initcall(init_syscall32); 
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++# CONFIG_XEN_GRANT_DEV is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
 +
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
-+{
-+#ifndef USE_INT80
-+	if (use_sysenter < 0)
-+ 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC32 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xenU_x86_64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xenU_x86_64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,906 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:42:42 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 +
-+	/* Load these always in case some future AMD CPU supports
-+	   SYSENTER from compat mode too. */
-+	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
 +
-+	wrmsrl(MSR_CSTAR, ia32_cstar_target);
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/vsyscall-int80.S tmp-linux-2.6-xen.patch/arch/x86_64/ia32/vsyscall-int80.S
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/vsyscall-int80.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/vsyscall-int80.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,58 @@
-+/*
-+ * Code for the vsyscall page.  This version uses the old int $0x80 method.
-+ *
-+ * NOTE:
-+ * 1) __kernel_vsyscall _must_ be first in this page.
-+ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-+ *    for details.
-+ */
-+#include <asm/ia32_unistd.h>
-+#include <asm/asm-offsets.h>
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++# CONFIG_MICROCODE is not set
++# CONFIG_X86_MSR is not set
++CONFIG_X86_CPUID=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_SMP=y
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=8
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
++
++#
++# Bus options (PCI etc.)
++#
++# CONFIG_PCI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# PHY device support
++#
++
++#
++# Ethernet (10 or 100Mbit)
++#
++# CONFIG_NET_ETHERNET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++# CONFIG_USB_ARCH_HAS_HCD is not set
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++# CONFIG_USB_ARCH_HAS_EHCI is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_X86_64 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC32 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen_ia64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen_ia64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1703 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:00 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_IA64_GENERIC=y
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_IA64_XEN is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++CONFIG_IA64_CYCLONE=y
++CONFIG_IOSAPIC=y
++# CONFIG_IA64_SGI_SN_XP is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM_MANUAL=y
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_NEED_MULTIPLE_NODES=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_MIGRATION is not set
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=10
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
++# CONFIG_IA32_SUPPORT is not set
++# CONFIG_IA64_MCA_RECOVERY is not set
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_SGI_SN=y
++
++#
++# SN Devices
++#
++# CONFIG_SGI_IOC3 is not set
++CONFIG_KEXEC=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
 +
-+	.code32
-+	.text
-+	.section .text.vsyscall,"ax"
-+	.globl __kernel_vsyscall
-+	.type __kernel_vsyscall, at function
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+	int $0x80
-+	ret
-+.LEND_vsyscall:
-+	.size __kernel_vsyscall,.-.LSTART_vsyscall
-+	.previous
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++# CONFIG_HOTPLUG_PCI_SGI is not set
 +
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAME:
-+	.long .LENDCIE-.LSTARTCIE
-+.LSTARTCIE:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zR"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0x0c		/* DW_CFA_def_cfa */
-+	.uleb128 4
-+	.uleb128 4
-+	.byte 0x88		/* DW_CFA_offset, column 0x8 */
-+	.uleb128 1
-+	.align 4
-+.LENDCIE:
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
 +
-+	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
-+.LSTARTFDE1:
-+	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
-+	.long .LSTART_vsyscall-.	/* PC-relative start address */
-+	.long .LEND_vsyscall-.LSTART_vsyscall
-+	.uleb128 0			/* Augmentation length */
-+	.align 4
-+.LENDFDE1:
-+		
-+/*
-+ * Get the common code for the sigreturn entry points.
-+ */
-+#define SYSCALL_ENTER_KERNEL    int $0x80
-+#include "vsyscall-sigreturn.S"
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/ia32/vsyscall-sigreturn.S tmp-linux-2.6-xen.patch/arch/x86_64/ia32/vsyscall-sigreturn.S
---- pristine-linux-2.6.18.2/arch/x86_64/ia32/vsyscall-sigreturn.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/ia32/vsyscall-sigreturn.S	2007-07-30 16:35:11.000000000 +0200
-@@ -139,5 +139,5 @@ __kernel_rt_sigreturn:
- 	.align 4
- .LENDFDE3:
- 
--#include "../../i386/kernel/vsyscall-note.S"
-+#include <vsyscall-note.S>
- 
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/Kconfig tmp-linux-2.6-xen.patch/arch/x86_64/Kconfig
---- pristine-linux-2.6.18.2/arch/x86_64/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -135,6 +135,22 @@ config GENERIC_CPU
- 
- endchoice
- 
-+config X86_64_XEN
-+	bool "Enable Xen compatible kernel"
-+	select SWIOTLB
-+	help
-+	  This option will compile a kernel compatible with Xen hypervisor
++#
++# Networking
++#
++CONFIG_NET=y
 +
-+config X86_NO_TSS
-+	bool
-+	depends on X86_64_XEN
-+	default y
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
 +
-+config X86_NO_IDT
-+	bool
-+	depends on X86_64_XEN
-+	default y
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
 +
- #
- # Define implied options from the CPU selection here
- #
-@@ -155,6 +171,7 @@ config X86_INTERNODE_CACHE_BYTES
- 
- config X86_TSC
- 	bool
-+	depends on !X86_64_XEN
- 	default y
- 
- config X86_GOOD_APIC
-@@ -197,7 +214,7 @@ config X86_CPUID
- 
- config X86_HT
- 	bool
--	depends on SMP && !MK8
-+	depends on SMP && !MK8 && !X86_64_XEN
- 	default y
- 
- config MATH_EMULATION
-@@ -211,14 +228,22 @@ config EISA
- 
- config X86_IO_APIC
- 	bool
-+	depends !XEN_UNPRIVILEGED_GUEST
- 	default y
- 
-+config X86_XEN_GENAPIC
-+	bool
-+	depends X86_64_XEN
-+	default XEN_PRIVILEGED_GUEST || SMP
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
 +
- config X86_LOCAL_APIC
- 	bool
-+	depends !XEN_UNPRIVILEGED_GUEST
- 	default y
- 
- config MTRR
- 	bool "MTRR (Memory Type Range Register) support"
-+	depends on !XEN_UNPRIVILEGED_GUEST
- 	---help---
- 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
- 	  the Memory Type Range Registers (MTRRs) may be used to control
-@@ -259,7 +284,7 @@ config SMP
- 
- config SCHED_SMT
- 	bool "SMT (Hyperthreading) scheduler support"
--	depends on SMP
-+	depends on SMP && !X86_64_XEN
- 	default n
- 	help
- 	  SMT scheduler support improves the CPU scheduler's decision making
-@@ -269,7 +294,7 @@ config SCHED_SMT
- 
- config SCHED_MC
- 	bool "Multi-core scheduler support"
--	depends on SMP
-+	depends on SMP && !X86_64_XEN
- 	default y
- 	help
- 	  Multi-core scheduler support improves the CPU scheduler's decision
-@@ -280,7 +305,7 @@ source "kernel/Kconfig.preempt"
- 
- config NUMA
-        bool "Non Uniform Memory Access (NUMA) Support"
--       depends on SMP
-+       depends on SMP && !X86_64_XEN
-        help
- 	 Enable NUMA (Non Uniform Memory Access) support. The kernel 
- 	 will try to allocate memory used by a CPU on the local memory 
-@@ -341,7 +366,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
- 
- config ARCH_SPARSEMEM_ENABLE
- 	def_bool y
--	depends on (NUMA || EXPERIMENTAL)
-+	depends on (NUMA || EXPERIMENTAL) && !X86_64_XEN
- 
- config ARCH_MEMORY_PROBE
- 	def_bool y
-@@ -365,6 +390,7 @@ config NR_CPUS
- 	int "Maximum number of CPUs (2-256)"
- 	range 2 255
- 	depends on SMP
-+	default "16" if X86_64_XEN
- 	default "8"
- 	help
- 	  This allows you to specify the maximum number of CPUs which this
-@@ -387,6 +413,7 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
- 
- config HPET_TIMER
- 	bool
-+	depends on !X86_64_XEN
- 	default y
- 	help
- 	  Use the IA-PC HPET (High Precision Event Timer) to manage
-@@ -407,7 +434,7 @@ config IOMMU
- 	default y
- 	select SWIOTLB
- 	select AGP
--	depends on PCI
-+	depends on PCI && !X86_64_XEN
- 	help
- 	  Support for full DMA access of devices with 32bit memory access only
- 	  on systems with more than 3GB. This is usually needed for USB,
-@@ -423,7 +450,7 @@ config CALGARY_IOMMU
- 	bool "IBM Calgary IOMMU support"
- 	default y
- 	select SWIOTLB
--	depends on PCI && EXPERIMENTAL
-+	depends on PCI && !X86_64_XEN && EXPERIMENTAL
- 	help
- 	  Support for hardware IOMMUs in IBM's xSeries x366 and x460
- 	  systems. Needed to run systems with more than 3GB of memory
-@@ -444,6 +471,7 @@ config SWIOTLB
- 
- config X86_MCE
- 	bool "Machine check support" if EMBEDDED
-+	depends on !X86_64_XEN
- 	default y
- 	help
- 	   Include a machine check error handler to report hardware errors.
-@@ -469,7 +497,7 @@ config X86_MCE_AMD
- 
- config KEXEC
- 	bool "kexec system call (EXPERIMENTAL)"
--	depends on EXPERIMENTAL
-+	depends on EXPERIMENTAL && !XEN_UNPRIVILEGED_GUEST
- 	help
- 	  kexec is a system call that implements the ability to shutdown your
- 	  current kernel, and to start another kernel.  It is like a reboot
-@@ -564,8 +592,11 @@ config GENERIC_PENDING_IRQ
- 	default y
- 
- menu "Power management options"
-+	depends on !XEN_UNPRIVILEGED_GUEST
- 
-+if !X86_64_XEN
- source kernel/power/Kconfig
-+endif
- 
- source "drivers/acpi/Kconfig"
- 
-@@ -588,6 +619,21 @@ config PCI_MMCONFIG
- 	bool "Support mmconfig PCI config space access"
- 	depends on PCI && ACPI
- 
-+config XEN_PCIDEV_FRONTEND
-+	bool "Xen PCI Frontend"
-+	depends on PCI && X86_64_XEN
-+	default y
-+	help
-+	  The PCI device frontend driver allows the kernel to import arbitrary
-+	  PCI devices from a PCI backend to support PCI driver domains.
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
 +
-+config XEN_PCIDEV_FE_DEBUG
-+	bool "Xen PCI Frontend Debugging"
-+	depends on XEN_PCIDEV_FRONTEND
-+	default n
-+	help
-+	  Enables some debug statements within the PCI Frontend.
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
 +
- source "drivers/pci/pcie/Kconfig"
- 
- source "drivers/pci/Kconfig"
-@@ -658,4 +704,6 @@ source "security/Kconfig"
- 
- source "crypto/Kconfig"
- 
-+source "drivers/xen/Kconfig"
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
 +
- source "lib/Kconfig"
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/acpi/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/kernel/acpi/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/acpi/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/acpi/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -7,3 +7,4 @@ obj-y			+= processor.o
- processor-y		:= ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
- endif
- 
-+boot-$(CONFIG_XEN)		:= ../../../i386/kernel/acpi/boot-xen.o
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/apic-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/apic-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/apic-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,197 @@
-+/*
-+ *	Local APIC handling, local APIC timers
-+ *
-+ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively.
-+ *	Maciej W. Rozycki	:	Various updates and fixes.
-+ *	Mikael Pettersson	:	Power Management for UP-APIC.
-+ *	Pavel Machek and
-+ *	Mikael Pettersson	:	PM converted to driver model.
-+ */
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
 +
-+#include <linux/init.h>
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
 +
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+#include <linux/module.h>
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
 +
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+#include <asm/idle.h>
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
 +
-+int apic_verbosity;
++#
++# Device Drivers
++#
 +
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk("unexpected IRQ trap at vector %02x\n", irq);
-+	/*
-+	 * Currently unexpected vectors happen only on SMP and APIC.
-+	 * We _must_ ack these because every local APIC has only N
-+	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-+	 * holds up an irq slot - in excessive cases (when multiple
-+	 * unexpected vectors occur) that might lock up the APIC
-+	 * completely.
-+  	 * But don't ack when the APIC is disabled. -AK
-+	 */
-+	if (!disable_apic)
-+		ack_APIC_irq();
-+}
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
 +
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+	return -EINVAL;
-+}
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
 +
-+void smp_local_timer_interrupt(struct pt_regs *regs)
-+{
-+	profile_tick(CPU_PROFILING, regs);
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_SMP
-+		update_process_times(user_mode(regs));
-+#endif
-+#endif
-+	/*
-+	 * We take the 'long' return path, and there every subsystem
-+	 * grabs the appropriate locks (kernel lock/ irq lock).
-+	 *
-+	 * we might want to decouple profiling from the 'long path',
-+	 * and do the profiling totally in assembly.
-+	 *
-+	 * Currently this isn't too much of an issue (performance wise),
-+	 * we can take more than 100K local irqs per second on a 100 MHz P5.
-+	 */
-+}
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
 +
-+/*
-+ * Local APIC timer interrupt. This is the most natural way for doing
-+ * local interrupts, but local timer interrupts can be emulated by
-+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
-+ *
-+ * [ if a single-CPU system runs an SMP kernel then we call the local
-+ *   interrupt as well. Thus we cannot inline the local irq ... ]
-+ */
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
-+{
-+	/*
-+	 * the NMI deadlock-detector uses this.
-+	 */
-+	add_pda(apic_timer_irqs, 1);
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
 +
-+	/*
-+	 * NOTE! We'd better ACK the irq immediately,
-+	 * because timer handling can be slow.
-+	 */
-+	ack_APIC_irq();
-+	/*
-+	 * update_process_times() expects us to have done irq_enter().
-+	 * Besides, if we don't timer interrupts ignore the global
-+	 * interrupt lock, which is the WrongThing (tm) to do.
-+	 */
-+	exit_idle();
-+	irq_enter();
-+	smp_local_timer_interrupt(regs);
-+	irq_exit();
-+}
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
 +
-+/*
-+ * This interrupt should _never_ happen with our APIC/SMP architecture
-+ */
-+asmlinkage void smp_spurious_interrupt(void)
-+{
-+	unsigned int v;
-+	exit_idle();
-+	irq_enter();
-+	/*
-+	 * Check if this really is a spurious interrupt and ACK it
-+	 * if it is a vectored one.  Just in case...
-+	 * Spurious interrupts should not be ACKed.
-+	 */
-+	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-+	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-+		ack_APIC_irq();
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=y
++CONFIG_BLK_DEV_NBD=m
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
 +
-+#if 0
-+	static unsigned long last_warning; 
-+	static unsigned long skipped; 
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
 +
-+	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
-+	if (time_before(last_warning+30*HZ,jiffies)) { 
-+		printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
-+		       smp_processor_id(), skipped);
-+		last_warning = jiffies; 
-+		skipped = 0;
-+	} else { 
-+		skipped++; 
-+	} 
-+#endif 
-+	irq_exit();
-+}
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=y
++# CONFIG_IDE_TASK_IOCTL is not set
 +
-+/*
-+ * This interrupt should never happen with our APIC/SMP architecture
-+ */
++#
++# IDE chipset support/bugfixes
++#
++# CONFIG_IDE_GENERIC is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++CONFIG_BLK_DEV_CMD64X=y
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
 +
-+asmlinkage void smp_error_interrupt(void)
-+{
-+	unsigned int v, v1;
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
 +
-+	exit_idle();
-+	irq_enter();
-+	/* First tickle the hardware, only then report what went on. -- REW */
-+	v = apic_read(APIC_ESR);
-+	apic_write(APIC_ESR, 0);
-+	v1 = apic_read(APIC_ESR);
-+	ack_APIC_irq();
-+	atomic_inc(&irq_err_count);
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
 +
-+	/* Here is what the APIC error bits mean:
-+	   0: Send CS error
-+	   1: Receive CS error
-+	   2: Send accept error
-+	   3: Receive accept error
-+	   4: Reserved
-+	   5: Send illegal vector
-+	   6: Received illegal vector
-+	   7: Illegal register address
-+	*/
-+	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
-+	        smp_processor_id(), v , v1);
-+	irq_exit();
-+}
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
 +
-+int disable_apic;
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
 +
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (smp_found_config)
-+		if (!skip_ioapic_setup && nr_ioapics)
-+			setup_IO_APIC();
-+#endif
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
 +
-+	return 1;
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/asm-offsets.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/asm-offsets.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/asm-offsets.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/asm-offsets.c	2007-07-30 16:35:11.000000000 +0200
-@@ -67,8 +67,10 @@ int main(void)
- 	DEFINE(pbe_address, offsetof(struct pbe, address));
- 	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
- 	DEFINE(pbe_next, offsetof(struct pbe, next));
-+#ifndef CONFIG_X86_NO_TSS
- 	BLANK();
- 	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
-+#endif
- 	BLANK();
- 	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
- 	return 0;
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/crash.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/crash.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/crash.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/crash.c	2007-07-30 16:35:11.000000000 +0200
-@@ -92,6 +92,7 @@ static void crash_save_self(struct pt_re
- 	crash_save_this_cpu(regs, cpu);
- }
- 
-+#ifndef CONFIG_XEN
- #ifdef CONFIG_SMP
- static atomic_t waiting_for_crash_ipi;
- 
-@@ -156,6 +157,7 @@ static void nmi_shootdown_cpus(void)
- 	/* There are no cpus to shootdown */
- }
- #endif
-+#endif /* CONFIG_XEN */
- 
- void machine_crash_shutdown(struct pt_regs *regs)
- {
-@@ -173,6 +175,8 @@ void machine_crash_shutdown(struct pt_re
- 
- 	/* Make a note of crashing cpu. Will be used in NMI callback.*/
- 	crashing_cpu = smp_processor_id();
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
 +
-+#ifndef CONFIG_XEN
- 	nmi_shootdown_cpus();
- 
- 	if(cpu_has_apic)
-@@ -181,6 +185,6 @@ void machine_crash_shutdown(struct pt_re
- #if defined(CONFIG_X86_IO_APIC)
- 	disable_IO_APIC();
- #endif
--
-+#endif /* CONFIG_XEN */
- 	crash_save_self(regs);
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/e820-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/e820-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/e820-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/e820-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,774 @@
-+/* 
-+ * Handle the memory map.
-+ * The functions here do the job until bootmem takes over.
-+ *
-+ *  Getting sanitize_e820_map() in sync with i386 version by applying change:
-+ *  -  Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ *     Alex Achenbach <xela at slit.de>, December 2002.
-+ *  Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/string.h>
-+#include <linux/kexec.h>
-+#include <linux/module.h>
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
 +
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/proto.h>
-+#include <asm/bootsetup.h>
-+#include <asm/sections.h>
-+#include <xen/interface/memory.h>
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
 +
-+/* 
-+ * PFN of last memory page.
-+ */
-+unsigned long end_pfn; 
-+EXPORT_SYMBOL(end_pfn);
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
 +
-+/* 
-+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
-+ * The direct mapping extends to end_pfn_map, so that we can directly access
-+ * apertures, ACPI and other tables without having to play with fixmaps.
-+ */ 
-+unsigned long end_pfn_map; 
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
 +
-+/* 
-+ * Last pfn which the user wants to use.
-+ */
-+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;  
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=y
++# CONFIG_ARCNET_1201 is not set
++# CONFIG_ARCNET_1051 is not set
++# CONFIG_ARCNET_RAW is not set
++# CONFIG_ARCNET_CAP is not set
++# CONFIG_ARCNET_COM90xx is not set
++# CONFIG_ARCNET_COM90xxIO is not set
++# CONFIG_ARCNET_RIM_I is not set
++# CONFIG_ARCNET_COM20020 is not set
 +
-+extern struct resource code_resource, data_resource;
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
 +
-+/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
-+static inline int bad_addr(unsigned long *addrp, unsigned long size)
-+{ 
-+	unsigned long addr = *addrp, last = addr + size; 
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
 +
-+#ifndef CONFIG_XEN
-+	/* various gunk below that needed for SMP startup */
-+	if (addr < 0x8000) { 
-+		*addrp = 0x8000;
-+		return 1; 
-+	}
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=y
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
 +
-+	/* direct mapping tables of the kernel */
-+	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
-+		*addrp = table_end << PAGE_SHIFT; 
-+		return 1;
-+	} 
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
 +
-+	/* initrd */ 
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (LOADER_TYPE && INITRD_START && last >= INITRD_START && 
-+	    addr < INITRD_START+INITRD_SIZE) { 
-+		*addrp = INITRD_START + INITRD_SIZE; 
-+		return 1;
-+	} 
-+#endif
-+	/* kernel code + 640k memory hole (later should not be needed, but 
-+	   be paranoid for now) */
-+	if (last >= 640*1024 && addr < 1024*1024) {
-+		*addrp = 1024*1024;
-+		return 1;
-+	}
-+	if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
-+		*addrp = __pa_symbol(&_end);
-+		return 1;
-+	}
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
 +
-+	if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
-+		*addrp = ebda_addr + ebda_size;
-+		return 1;
-+	}
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
 +
-+	/* XXX ramdisk image here? */ 
-+#else
-+	if (last < (table_end<<PAGE_SHIFT)) {
-+		*addrp = table_end << PAGE_SHIFT;
-+		return 1;
-+	}
-+#endif
-+	return 0;
-+} 
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
 +
-+#ifndef CONFIG_XEN
-+/*
-+ * This function checks if any part of the range <start,end> is mapped
-+ * with type.
-+ */
-+int __meminit
-+e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
-+{ 
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) { 
-+		struct e820entry *ei = &e820.map[i]; 
-+		if (type && ei->type != type) 
-+			continue;
-+		if (ei->addr >= end || ei->addr + ei->size <= start)
-+			continue; 
-+		return 1; 
-+	} 
-+	return 0;
-+}
-+#endif
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
 +
-+/*
-+ * This function checks if the entire range <start,end> is mapped with type.
-+ *
-+ * Note: this function only works correct if the e820 table is sorted and
-+ * not-overlapping, which is the case
-+ */
-+int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
-+{
-+	int i;
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
 +
-+#ifndef CONFIG_XEN
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+#else
-+	extern struct e820map machine_e820;
++#
++# Old ISDN4Linux
++#
++# CONFIG_ISDN_I4L is not set
 +
-+	if (!is_initial_xendomain())
-+		return 0;
-+	for (i = 0; i < machine_e820.nr_map; i++) {
-+		const struct e820entry *ei = &machine_e820.map[i];
-+#endif
++#
++# CAPI subsystem
++#
++# CONFIG_ISDN_CAPI is not set
 +
-+		if (type && ei->type != type)
-+			continue;
-+		/* is the region (part) in overlap with the current region ?*/
-+		if (ei->addr >= end || ei->addr + ei->size <= start)
-+			continue;
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
 +
-+		/* if the region is at the beginning of <start,end> we move
-+		 * start to the end of the region since it's ok until there
-+		 */
-+		if (ei->addr <= start)
-+			start = ei->addr + ei->size;
-+		/* if start is now at or beyond end, we're done, full coverage */
-+		if (start >= end)
-+			return 1; /* we're done */
-+	}
-+	return 0;
-+}
++#
++# Input device support
++#
++CONFIG_INPUT=y
 +
-+/* 
-+ * Find a free area in a specific range. 
-+ */ 
-+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
-+{ 
-+	int i; 
-+	for (i = 0; i < e820.nr_map; i++) { 
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long addr = ei->addr, last; 
-+		if (ei->type != E820_RAM) 
-+			continue; 
-+		if (addr < start) 
-+			addr = start;
-+		if (addr > ei->addr + ei->size) 
-+			continue; 
-+		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
-+			;
-+		last = addr + size;
-+		if (last > ei->addr + ei->size)
-+			continue;
-+		if (last > end) 
-+			continue;
-+		return addr; 
-+	} 
-+	return -1UL;		
-+} 
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
 +
-+/* 
-+ * Free bootmem based on the e820 table for a node.
-+ */
-+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
-+{
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long last, addr;
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
 +
-+		if (ei->type != E820_RAM || 
-+		    ei->addr+ei->size <= start || 
-+		    ei->addr >= end)
-+			continue;
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=y
++# CONFIG_GAMEPORT_NS558 is not set
++# CONFIG_GAMEPORT_L4 is not set
++# CONFIG_GAMEPORT_EMU10K1 is not set
++# CONFIG_GAMEPORT_FM801 is not set
 +
-+		addr = round_up(ei->addr, PAGE_SIZE);
-+		if (addr < start) 
-+			addr = start;
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_COMPUTONE is not set
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_DIGIEPCA is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_SPECIALIX is not set
++# CONFIG_SX is not set
++# CONFIG_RIO is not set
++# CONFIG_STALDRV is not set
++# CONFIG_SGI_SNSC is not set
++# CONFIG_SGI_TIOCX is not set
 +
-+		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (last >= end)
-+			last = end; 
++#
++# Serial drivers
++#
 +
-+		if (last > addr && last-addr >= PAGE_SIZE)
-+			free_bootmem_node(pgdat, addr, last-addr);
-+	}
-+}
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_SGI_L1_CONSOLE is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
 +
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+unsigned long __init e820_end_of_ram(void)
-+{
-+	int i;
-+	unsigned long end_pfn = 0;
-+	
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long start, end;
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
 +
-+		start = round_up(ei->addr, PAGE_SIZE); 
-+		end = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (start >= end)
-+			continue;
-+		if (ei->type == E820_RAM) { 
-+		if (end > end_pfn<<PAGE_SHIFT)
-+			end_pfn = end>>PAGE_SHIFT;
-+		} else { 
-+			if (end > end_pfn_map<<PAGE_SHIFT) 
-+				end_pfn_map = end>>PAGE_SHIFT;
-+		} 
-+	}
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
 +
-+	if (end_pfn > end_pfn_map) 
-+		end_pfn_map = end_pfn;
-+	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
-+		end_pfn_map = MAXMEM>>PAGE_SHIFT;
-+	if (end_pfn > end_user_pfn)
-+		end_pfn = end_user_pfn;
-+	if (end_pfn > end_pfn_map) 
-+		end_pfn = end_pfn_map; 
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_AGP_I460=y
++# CONFIG_AGP_HP_ZX1 is not set
++# CONFIG_AGP_SGI_TIOCA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++# CONFIG_MMTIMER is not set
 +
-+	return end_pfn;	
-+}
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
 +
-+/* 
-+ * Compute how much memory is missing in a range.
-+ * Unlike the other functions in this file the arguments are in page numbers.
-+ */
-+unsigned long __init
-+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+	unsigned long ram = 0;
-+	unsigned long start = start_pfn << PAGE_SHIFT;
-+	unsigned long end = end_pfn << PAGE_SHIFT;
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+		unsigned long last, addr;
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
 +
-+		if (ei->type != E820_RAM ||
-+		    ei->addr+ei->size <= start ||
-+		    ei->addr >= end)
-+			continue;
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
 +
-+		addr = round_up(ei->addr, PAGE_SIZE);
-+		if (addr < start)
-+			addr = start;
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
 +
-+		last = round_down(ei->addr + ei->size, PAGE_SIZE);
-+		if (last >= end)
-+			last = end;
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
 +
-+		if (last > addr)
-+			ram += last - addr;
-+	}
-+	return ((end - start) - ram) >> PAGE_SHIFT;
-+}
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
 +
-+/*
-+ * Mark e820 reserved areas as busy for the resource manager.
-+ */
-+void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
-+{
-+	int i;
-+	for (i = 0; i < nr_map; i++) {
-+		struct resource *res;
-+		res = alloc_bootmem_low(sizeof(struct resource));
-+		switch (e820[i].type) {
-+		case E820_RAM:	res->name = "System RAM"; break;
-+		case E820_ACPI:	res->name = "ACPI Tables"; break;
-+		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
-+		default:	res->name = "reserved";
-+		}
-+		res->start = e820[i].addr;
-+		res->end = res->start + e820[i].size - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		request_resource(&iomem_resource, res);
-+		if (e820[i].type == E820_RAM) {
-+			/*
-+			 *  We don't know which RAM region contains kernel data,
-+			 *  so we try it repeatedly and let the resource manager
-+			 *  test it.
-+			 */
-+#ifndef CONFIG_XEN
-+			request_resource(res, &code_resource);
-+			request_resource(res, &data_resource);
-+#endif
-+#ifdef CONFIG_KEXEC
-+			if (crashk_res.start != crashk_res.end)
-+				request_resource(res, &crashk_res);
-+#ifdef CONFIG_XEN
-+			xen_machine_kexec_register_resources(res);
-+#endif
-+#endif
-+		}
-+	}
-+}
++#
++# Dallas's 1-wire bus
++#
 +
-+/* 
-+ * Add a memory region to the kernel e820 map.
-+ */ 
-+void __init add_memory_region(unsigned long start, unsigned long size, int type)
-+{
-+	int x = e820.nr_map;
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
 +
-+	if (x == E820MAX) {
-+		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+		return;
-+	}
++#
++# Misc devices
++#
 +
-+	e820.map[x].addr = start;
-+	e820.map[x].size = size;
-+	e820.map[x].type = type;
-+	e820.nr_map++;
-+}
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
 +
-+void __init e820_print_map(char *who)
-+{
-+	int i;
++#
++# Video Capture Adapters
++#
 +
-+	for (i = 0; i < e820.nr_map; i++) {
-+		printk(" %s: %016Lx - %016Lx ", who,
-+			(unsigned long long) e820.map[i].addr,
-+			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	printk("(usable)\n");
-+				break;
-+		case E820_RESERVED:
-+				printk("(reserved)\n");
-+				break;
-+		case E820_ACPI:
-+				printk("(ACPI data)\n");
-+				break;
-+		case E820_NVS:
-+				printk("(ACPI NVS)\n");
-+				break;
-+		default:	printk("type %u\n", e820.map[i].type);
-+				break;
-+		}
-+	}
-+}
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
 +
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries.  The following 
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+	struct change_member {
-+		struct e820entry *pbios; /* pointer to original bios entry */
-+		unsigned long long addr; /* address for this change point */
-+	};
-+	static struct change_member change_point_list[2*E820MAX] __initdata;
-+	static struct change_member *change_point[2*E820MAX] __initdata;
-+	static struct e820entry *overlap_list[E820MAX] __initdata;
-+	static struct e820entry new_bios[E820MAX] __initdata;
-+	struct change_member *change_tmp;
-+	unsigned long current_type, last_type;
-+	unsigned long long last_addr;
-+	int chgidx, still_changing;
-+	int overlap_entries;
-+	int new_bios_entry;
-+	int old_nr, new_nr, chg_nr;
-+	int i;
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
 +
-+	/*
-+		Visually we're performing the following (1,2,3,4 = memory types)...
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
 +
-+		Sample memory map (w/overlaps):
-+		   ____22__________________
-+		   ______________________4_
-+		   ____1111________________
-+		   _44_____________________
-+		   11111111________________
-+		   ____________________33__
-+		   ___________44___________
-+		   __________33333_________
-+		   ______________22________
-+		   ___________________2222_
-+		   _________111111111______
-+		   _____________________11_
-+		   _________________4______
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
 +
-+		Sanitized equivalent (no overlap):
-+		   1_______________________
-+		   _44_____________________
-+		   ___1____________________
-+		   ____22__________________
-+		   ______11________________
-+		   _________1______________
-+		   __________3_____________
-+		   ___________44___________
-+		   _____________33_________
-+		   _______________2________
-+		   ________________1_______
-+		   _________________4______
-+		   ___________________2____
-+		   ____________________33__
-+		   ______________________4_
-+	*/
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
 +
-+	/* if there's only one memory region, don't bother */
-+	if (*pnr_map < 2)
-+		return -1;
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
 +
-+	old_nr = *pnr_map;
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
 +
-+	/* bail out if we find any unreasonable addresses in bios map */
-+	for (i=0; i<old_nr; i++)
-+		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+			return -1;
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 +
-+	/* create pointers for initial change-point information (for sorting) */
-+	for (i=0; i < 2*old_nr; i++)
-+		change_point[i] = &change_point_list[i];
++#
++# Sound
++#
++CONFIG_SOUND=y
 +
-+	/* record all known change-points (starting and ending addresses),
-+	   omitting those that are for empty memory regions */
-+	chgidx = 0;
-+	for (i=0; i < old_nr; i++)	{
-+		if (biosmap[i].size != 0) {
-+			change_point[chgidx]->addr = biosmap[i].addr;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+		}
-+	}
-+	chg_nr = chgidx;
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_SEQ_DUMMY=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
 +
-+	/* sort change-point list by memory addresses (low -> high) */
-+	still_changing = 1;
-+	while (still_changing)	{
-+		still_changing = 0;
-+		for (i=1; i < chg_nr; i++)  {
-+			/* if <current_addr> > <last_addr>, swap */
-+			/* or, if current=<start_addr> & last=<end_addr>, swap */
-+			if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+				((change_point[i]->addr == change_point[i-1]->addr) &&
-+				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+			   )
-+			{
-+				change_tmp = change_point[i];
-+				change_point[i] = change_point[i-1];
-+				change_point[i-1] = change_tmp;
-+				still_changing=1;
-+			}
-+		}
-+	}
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++CONFIG_SND_DUMMY=y
++CONFIG_SND_VIRMIDI=y
++# CONFIG_SND_MTPAV is not set
++CONFIG_SND_SERIAL_U16550=y
++CONFIG_SND_MPU401=y
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++CONFIG_SND_ATIIXP=y
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
 +
-+	/* create a new bios memory map, removing overlaps */
-+	overlap_entries=0;	 /* number of entries in the overlap table */
-+	new_bios_entry=0;	 /* index for creating new bios map entries */
-+	last_type = 0;		 /* start with undefined memory type */
-+	last_addr = 0;		 /* start with 0 as last starting address */
-+	/* loop through change-points, determining affect on the new bios map */
-+	for (chgidx=0; chgidx < chg_nr; chgidx++)
-+	{
-+		/* keep track of all overlapping bios entries */
-+		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+		{
-+			/* add map entry to overlap list (> 1 entry implies an overlap) */
-+			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+		}
-+		else
-+		{
-+			/* remove entry from list (order independent, so swap with last) */
-+			for (i=0; i<overlap_entries; i++)
-+			{
-+				if (overlap_list[i] == change_point[chgidx]->pbios)
-+					overlap_list[i] = overlap_list[overlap_entries-1];
-+			}
-+			overlap_entries--;
-+		}
-+		/* if there are overlapping entries, decide which "type" to use */
-+		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+		current_type = 0;
-+		for (i=0; i<overlap_entries; i++)
-+			if (overlap_list[i]->type > current_type)
-+				current_type = overlap_list[i]->type;
-+		/* continue building up new bios map based on this information */
-+		if (current_type != last_type)	{
-+			if (last_type != 0)	 {
-+				new_bios[new_bios_entry].size =
-+					change_point[chgidx]->addr - last_addr;
-+				/* move forward only if the new size was non-zero */
-+				if (new_bios[new_bios_entry].size != 0)
-+					if (++new_bios_entry >= E820MAX)
-+						break; 	/* no more space left for new bios entries */
-+			}
-+			if (current_type != 0)	{
-+				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+				new_bios[new_bios_entry].type = current_type;
-+				last_addr=change_point[chgidx]->addr;
-+			}
-+			last_type = current_type;
-+		}
-+	}
-+	new_nr = new_bios_entry;   /* retain count for new bios entries */
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++# CONFIG_SOUND_BT878 is not set
++# CONFIG_SOUND_ES1371 is not set
++# CONFIG_SOUND_ICH is not set
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++# CONFIG_SOUND_VIA82CXXX is not set
++# CONFIG_SOUND_TVMIXER is not set
 +
-+	/* copy new bios mapping into original location */
-+	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+	*pnr_map = new_nr;
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
 +
-+	return 0;
-+}
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
 +
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory.  If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+#ifndef CONFIG_XEN
-+	/* Only one memory region (or negative)? Ignore it */
-+	if (nr_map < 2)
-+		return -1;
-+#else
-+	BUG_ON(nr_map < 1);
-+#endif
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
 +
-+	do {
-+		unsigned long start = biosmap->addr;
-+		unsigned long size = biosmap->size;
-+		unsigned long end = start + size;
-+		unsigned long type = biosmap->type;
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
 +
-+		/* Overflow in 64 bits? Ignore the memory map. */
-+		if (start > end)
-+			return -1;
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
 +
-+#ifndef CONFIG_XEN
-+		/*
-+		 * Some BIOSes claim RAM in the 640k - 1M region.
-+		 * Not right. Fix it up.
-+		 * 
-+		 * This should be removed on Hammer which is supposed to not
-+		 * have non e820 covered ISA mappings there, but I had some strange
-+		 * problems so it stays for now.  -AK
-+		 */
-+		if (type == E820_RAM) {
-+			if (start < 0x100000ULL && end > 0xA0000ULL) {
-+				if (start < 0xA0000ULL)
-+					add_memory_region(start, 0xA0000ULL-start, type);
-+				if (end <= 0x100000ULL)
-+					continue;
-+				start = 0x100000ULL;
-+				size = end - start;
-+			}
-+		}
-+#endif
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
 +
-+		add_memory_region(start, size, type);
-+	} while (biosmap++,--nr_map);
-+	return 0;
-+}
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
 +
-+#ifndef CONFIG_XEN
-+void __init setup_memory_region(void)
-+{
-+	char *who = "BIOS-e820";
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
 +
-+	/*
-+	 * Try to copy the BIOS-supplied E820-map.
-+	 *
-+	 * Otherwise fake a memory map; one section from 0k->640k,
-+	 * the next section from 1mb->appropriate_mem_k
-+	 */
-+	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
-+	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
-+		unsigned long mem_size;
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
 +
-+		/* compare results from other methods and take the greater */
-+		if (ALT_MEM_K < EXT_MEM_K) {
-+			mem_size = EXT_MEM_K;
-+			who = "BIOS-88";
-+		} else {
-+			mem_size = ALT_MEM_K;
-+			who = "BIOS-e801";
-+		}
++#
++# USB port drivers
++#
 +
-+		e820.nr_map = 0;
-+		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
-+		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
-+  	}
-+	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+	e820_print_map(who);
-+}
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
 +
-+#else  /* CONFIG_XEN */
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TEST is not set
 +
-+void __init setup_memory_region(void)
-+{
-+	int rc;
-+	struct xen_memory_map memmap;
-+	/*
-+	 * This is rather large for a stack variable but this early in
-+	 * the boot process we know we have plenty slack space.
-+	 */
-+	struct e820entry map[E820MAX];
++#
++# USB DSL modem support
++#
 +
-+	memmap.nr_entries = E820MAX;
-+	set_xen_guest_handle(memmap.buffer, map);
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
 +
-+	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
-+	if ( rc == -ENOSYS ) {
-+		memmap.nr_entries = 1;
-+		map[0].addr = 0ULL;
-+		map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
-+		/* 8MB slack (to balance backend allocations). */
-+		map[0].size += 8 << 20;
-+		map[0].type = E820_RAM;
-+		rc = 0;
-+	}
-+	BUG_ON(rc);
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
 +
-+	sanitize_e820_map(map, (char *)&memmap.nr_entries);
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
 +
-+	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++#
++# LED drivers
++#
 +
-+	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+	e820_print_map("Xen");
-+}
-+#endif
++#
++# LED Triggers
++#
 +
-+void __init parse_memopt(char *p, char **from) 
-+{ 
-+	int i;
-+	unsigned long current_end;
-+	unsigned long end;
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
 +
-+	end_user_pfn = memparse(p, from);
-+	end_user_pfn >>= PAGE_SHIFT;	
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
 +
-+	end = end_user_pfn<<PAGE_SHIFT;
-+	i = e820.nr_map-1;
-+	current_end = e820.map[i].addr + e820.map[i].size;
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
 +
-+	if (current_end < end) {
-+		/*
-+                 * The e820 map ends before our requested size so
-+                 * extend the final entry to the requested address.
-+                 */
-+		if (e820.map[i].type == E820_RAM)
-+			e820.map[i].size = end - e820.map[i].addr;
-+		else
-+			add_memory_region(current_end, end - current_end, E820_RAM);
-+	}
-+} 
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
 +
-+void __init parse_memmapopt(char *p, char **from)
-+{
-+	unsigned long long start_at, mem_size;
++#
++# DMA Clients
++#
 +
-+	mem_size = memparse(p, from);
-+	p = *from;
-+	if (*p == '@') {
-+		start_at = memparse(p+1, from);
-+		add_memory_region(start_at, mem_size, E820_RAM);
-+	} else if (*p == '#') {
-+		start_at = memparse(p+1, from);
-+		add_memory_region(start_at, mem_size, E820_ACPI);
-+	} else if (*p == '$') {
-+		start_at = memparse(p+1, from);
-+		add_memory_region(start_at, mem_size, E820_RESERVED);
-+	} else {
-+		end_user_pfn = (mem_size >> PAGE_SHIFT);
-+	}
-+	p = *from;
-+}
++#
++# DMA Devices
++#
 +
-+unsigned long pci_mem_start = 0xaeedbabe;
-+EXPORT_SYMBOL(pci_mem_start);
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=y
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_XFS_RT is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
 +
-+/*
-+ * Search for the biggest gap in the low 32 bits of the e820
-+ * memory space.  We pass this space to PCI to assign MMIO resources
-+ * for hotplug or unconfigured devices in.
-+ * Hopefully the BIOS let enough space left.
-+ */
-+__init void e820_setup_gap(struct e820entry *e820, int nr_map)
-+{
-+	unsigned long gapstart, gapsize, round;
-+	unsigned long last;
-+	int i;
-+	int found = 0;
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
 +
-+	last = 0x100000000ull;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+	i = nr_map;
-+	while (--i >= 0) {
-+		unsigned long long start = e820[i].addr;
-+		unsigned long long end = start + e820[i].size;
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
 +
-+		/*
-+		 * Since "last" is at most 4GB, we know we'll
-+		 * fit in 32 bits if this condition is true
-+		 */
-+		if (last > end) {
-+			unsigned long gap = last - end;
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_IOMEM_MACHINE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
 +
-+			if (gap > gapsize) {
-+				gapsize = gap;
-+				gapstart = end;
-+				found = 1;
-+			}
-+		}
-+		if (start < last)
-+			last = start;
-+	}
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
 +
-+	if (!found) {
-+		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
-+		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+	}
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=y
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp437"
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
 +
-+	/*
-+	 * See how much we want to round up: start off with
-+	 * rounding to the next 1MB area.
-+	 */
-+	round = 0x100000;
-+	while ((gapsize >> 4) > round)
-+		round += round;
-+	/* Fun with two's complement */
-+	pci_mem_start = (gapstart + round) & -round;
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
 +
-+	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+		pci_mem_start, gapstart, gapsize);
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/early_printk-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/early_printk-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/early_printk-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/early_printk-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,302 @@
-+#include <linux/console.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/screen_info.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/fcntl.h>
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
 +
-+/* Simple VGA output */
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
 +
-+#ifdef __i386__
-+#include <asm/setup.h>
-+#define VGABASE		(__ISA_IO_base + 0xb8000)
-+#else
-+#include <asm/bootsetup.h>
-+#define VGABASE		((void __iomem *)0xffffffff800b8000UL)
-+#endif
++#
++# HP Simulator drivers
++#
++# CONFIG_HP_SIMETH is not set
++# CONFIG_HP_SIMSERIAL is not set
++# CONFIG_HP_SIMSCSI is not set
 +
-+#ifndef CONFIG_XEN
-+static int max_ypos = 25, max_xpos = 80;
-+static int current_ypos = 25, current_xpos = 0; 
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
 +
-+static void early_vga_write(struct console *con, const char *str, unsigned n)
-+{
-+	char c;
-+	int  i, k, j;
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=20
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
 +
-+	while ((c = *str++) != '\0' && n-- > 0) {
-+		if (current_ypos >= max_ypos) {
-+			/* scroll 1 line up */
-+			for (k = 1, j = 0; k < max_ypos; k++, j++) {
-+				for (i = 0; i < max_xpos; i++) {
-+					writew(readw(VGABASE+2*(max_xpos*k+i)),
-+					       VGABASE + 2*(max_xpos*j + i));
-+				}
-+			}
-+			for (i = 0; i < max_xpos; i++)
-+				writew(0x720, VGABASE + 2*(max_xpos*j + i));
-+			current_ypos = max_ypos-1;
-+		}
-+		if (c == '\n') {
-+			current_xpos = 0;
-+			current_ypos++;
-+		} else if (c != '\r')  {
-+			writew(((0x7 << 8) | (unsigned short) c),
-+			       VGABASE + 2*(max_xpos*current_ypos +
-+						current_xpos++));
-+			if (current_xpos >= max_xpos) {
-+				current_xpos = 0;
-+				current_ypos++;
-+			}
-+		}
-+	}
-+}
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
 +
-+static struct console early_vga_console = {
-+	.name =		"earlyvga",
-+	.write =	early_vga_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
 +
-+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
 +
-+static int early_serial_base = 0x3f8;  /* ttyS0 */
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER=y
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen_x86_32
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen_x86_32	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3326 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Oct 15 14:37:47 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 +
-+#define XMTRDY          0x20
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
 +
-+#define DLAB		0x80
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_CPUSETS=y
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
 +
-+#define TXR             0       /*  Transmit register (WRITE) */
-+#define RXR             0       /*  Receive register  (READ)  */
-+#define IER             1       /*  Interrupt Enable          */
-+#define IIR             2       /*  Interrupt ID              */
-+#define FCR             2       /*  FIFO control              */
-+#define LCR             3       /*  Line control              */
-+#define MCR             4       /*  Modem control             */
-+#define LSR             5       /*  Line Status               */
-+#define MSR             6       /*  Modem Status              */
-+#define DLL             0       /*  Divisor Latch Low         */
-+#define DLH             1       /*  Divisor latch High        */
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
 +
-+static int early_serial_putc(unsigned char ch)
-+{
-+	unsigned timeout = 0xffff;
-+	while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
-+		cpu_relax();
-+	outb(ch, early_serial_base + TXR);
-+	return timeout ? 0 : -1;
-+}
++#
++# Block layer
++#
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
 +
-+static void early_serial_write(struct console *con, const char *s, unsigned n)
-+{
-+	while (*s && n-- > 0) {
-+		early_serial_putc(*s);
-+		if (*s == '\n')
-+			early_serial_putc('\r');
-+		s++;
-+	}
-+}
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
 +
-+#define DEFAULT_BAUD 9600
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++CONFIG_X86_GENERIC=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_INTEL_USERCOPY=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=32
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=m
++CONFIG_SWIOTLB=y
 +
-+static __init void early_serial_init(char *s)
-+{
-+	unsigned char c;
-+	unsigned divisor;
-+	unsigned baud = DEFAULT_BAUD;
-+	char *e;
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++CONFIG_DELL_RBU=m
++CONFIG_DCDBAS=m
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++CONFIG_MTRR=y
++CONFIG_REGPARM=y
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 +
-+	if (*s == ',')
-+		++s;
++#
++# Power management options (ACPI, APM)
++#
++CONFIG_PM=y
++# CONFIG_PM_LEGACY is not set
++CONFIG_PM_DEBUG=y
++# CONFIG_PM_TRACE is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
 +
-+	if (*s) {
-+		unsigned port;
-+		if (!strncmp(s,"0x",2)) {
-+			early_serial_base = simple_strtoul(s, &e, 16);
-+		} else {
-+			static int bases[] = { 0x3f8, 0x2f8 };
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_PV_SLEEP=y
 +
-+			if (!strncmp(s,"ttyS",4))
-+				s += 4;
-+			port = simple_strtoul(s, &e, 10);
-+			if (port > 1 || s == e)
-+				port = 0;
-+			early_serial_base = bases[port];
-+		}
-+		s += strcspn(s, ",");
-+		if (*s == ',')
-+			s++;
-+	}
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
 +
-+	outb(0x3, early_serial_base + LCR);	/* 8n1 */
-+	outb(0, early_serial_base + IER);	/* no interrupt */
-+	outb(0, early_serial_base + FCR);	/* no fifo */
-+	outb(0x3, early_serial_base + MCR);	/* DTR + RTS */
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOBIOS is not set
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++# CONFIG_PCI_GOXEN_FE is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_SCx200=m
++CONFIG_K8_NB=y
 +
-+	if (*s) {
-+		baud = simple_strtoul(s, &e, 0);
-+		if (baud == 0 || s == e)
-+			baud = DEFAULT_BAUD;
-+	}
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_PCMCIA_IOCTL=y
++CONFIG_CARDBUS=y
 +
-+	divisor = 115200 / baud;
-+	c = inb(early_serial_base + LCR);
-+	outb(c | DLAB, early_serial_base + LCR);
-+	outb(divisor & 0xff, early_serial_base + DLL);
-+	outb((divisor >> 8) & 0xff, early_serial_base + DLH);
-+	outb(c & ~DLAB, early_serial_base + LCR);
-+}
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_PCCARD_NONSTATIC=m
 +
-+#else /* CONFIG_XEN */
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++CONFIG_HOTPLUG_PCI_FAKE=m
++CONFIG_HOTPLUG_PCI_ACPI=m
++CONFIG_HOTPLUG_PCI_ACPI_IBM=m
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
 +
-+static void
-+early_serial_write(struct console *con, const char *s, unsigned count)
-+{
-+	int n;
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_AOUT=m
++CONFIG_BINFMT_MISC=m
 +
-+	while (count > 0) {
-+		n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
-+		if (n <= 0)
-+			break;
-+		count -= n;
-+		s += n;
-+	}
-+} 
++#
++# Networking
++#
++CONFIG_NET=y
 +
-+static __init void early_serial_init(char *s)
-+{
-+}
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_TCP_CONG_ADVANCED=y
 +
-+/*
-+ * No early VGA console on Xen, as we do not have convenient ISA-space
-+ * mappings. Someone should fix this for domain 0. For now, use fake serial.
-+ */
-+#define early_vga_console early_serial_console
++#
++# TCP congestion control
++#
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
 +
-+#endif
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
 +
-+static struct console early_serial_console = {
-+	.name =		"earlyser",
-+	.write =	early_serial_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
 +
-+/* Console interface to a host file on AMD's SimNow! */
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
 +
-+static int simnow_fd;
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++# CONFIG_IPV6_ROUTER_PREF is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
 +
-+enum {
-+	MAGIC1 = 0xBACCD00A,
-+	MAGIC2 = 0xCA110000,
-+	XOPEN = 5,
-+	XWRITE = 4,
-+};
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_XTABLES=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++# CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 +
-+static noinline long simnow(long cmd, long a, long b, long c)
-+{
-+	long ret;
-+	asm volatile("cpuid" :
-+		     "=a" (ret) :
-+		     "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
-+	return ret;
-+}
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_SECMARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=m
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_NETBIOS_NS=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_PPTP=m
++CONFIG_IP_NF_H323=m
++CONFIG_IP_NF_SIP=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_NAT_PPTP=m
++CONFIG_IP_NF_NAT_H323=m
++CONFIG_IP_NF_NAT_SIP=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
 +
-+void __init simnow_init(char *str)
-+{
-+	char *fn = "klog";
-+	if (*str == '=')
-+		fn = ++str;
-+	/* error ignored */
-+	simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
-+}
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_RAW=m
 +
-+static void simnow_write(struct console *con, const char *s, unsigned n)
-+{
-+	simnow(XWRITE, simnow_fd, (unsigned long)s, n);
-+}
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
 +
-+static struct console simnow_console = {
-+	.name =		"simnow",
-+	.write =	simnow_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_ULOG=m
 +
-+/* Direct interface for emergencies */
-+struct console *early_console = &early_vga_console;
-+static int early_console_initialized = 0;
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++CONFIG_IP_DCCP_ACKVEC=y
 +
-+void early_printk(const char *fmt, ...)
-+{
-+	char buf[512];
-+	int n;
-+	va_list ap;
++#
++# DCCP CCIDs Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP_CCID2=m
++CONFIG_IP_DCCP_CCID3=m
++CONFIG_IP_DCCP_TFRC_LIB=m
 +
-+	va_start(ap,fmt);
-+	n = vscnprintf(buf,512,fmt,ap);
-+	early_console->write(early_console,buf,n);
-+	va_end(ap);
-+}
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
 +
-+static int __initdata keep_early;
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
 +
-+int __init setup_early_printk(char *opt)
-+{
-+	char *space;
-+	char buf[256];
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++CONFIG_ATM_CLIP_NO_ICMP=y
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++CONFIG_DECNET_ROUTER=y
++CONFIG_DECNET_ROUTE_FWMARK=y
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_ECONET=m
++CONFIG_ECONET_AUNUDP=y
++CONFIG_ECONET_NATIVE=y
++CONFIG_WAN_ROUTER=m
 +
-+	if (early_console_initialized)
-+		return 1;
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
 +
-+	strlcpy(buf,opt,sizeof(buf));
-+	space = strchr(buf, ' ');
-+	if (space)
-+		*space = 0;
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
 +
-+	if (strstr(buf,"keep"))
-+		keep_early = 1;
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++# CONFIG_NET_CLS_IND is not set
++CONFIG_NET_ESTIMATOR=y
 +
-+	if (!strncmp(buf, "serial", 6)) {
-+		early_serial_init(buf + 6);
-+		early_console = &early_serial_console;
-+	} else if (!strncmp(buf, "ttyS", 4)) {
-+		early_serial_init(buf);
-+		early_console = &early_serial_console;
-+	} else if (!strncmp(buf, "vga", 3)
-+#ifndef CONFIG_XEN
-+	           && SCREEN_INFO.orig_video_isVGA == 1) {
-+		max_xpos = SCREEN_INFO.orig_video_cols;
-+		max_ypos = SCREEN_INFO.orig_video_lines;
-+		current_ypos = SCREEN_INFO.orig_y;
-+#else
-+	           || !strncmp(buf, "xen", 3)) {
-+#endif
-+		early_console = &early_vga_console;
-+ 	} else if (!strncmp(buf, "simnow", 6)) {
-+ 		simnow_init(buf + 6);
-+ 		early_console = &simnow_console;
-+ 		keep_early = 1;
-+	}
-+	early_console_initialized = 1;
-+	register_console(early_console);
-+	return 0;
-+}
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_HAMRADIO=y
 +
-+void __init disable_early_printk(void)
-+{
-+	if (!early_console_initialized || !early_console)
-+		return;
-+	if (!keep_early) {
-+		printk("disabling early console\n");
-+		unregister_console(early_console);
-+		early_console_initialized = 0;
-+	} else {
-+		printk("keeping early console\n");
-+	}
-+}
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++# CONFIG_AX25_DAMA_SLAVE is not set
++CONFIG_NETROM=m
++CONFIG_ROSE=m
 +
-+__setup("earlyprintk=", setup_early_printk);
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/entry-xen.S tmp-linux-2.6-xen.patch/arch/x86_64/kernel/entry-xen.S
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/entry-xen.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1325 @@
-+/*
-+ *  linux/arch/x86_64/entry.S
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
-+ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
-+ * 
-+ *  $Id$
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *  Asit Mallick <asit.k.mallick at intel.com>
-+ *      Modified for Xen
-+ */
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
 +
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after an interrupt and after each system call.
-+ * 
-+ * Normal syscalls and interrupts don't save a full stack frame, this is 
-+ * only done for syscall tracing, signals or fork/exec et.al.
-+ * 
-+ * A note on terminology:	 
-+ * - top of stack: Architecture defined interrupt frame from SS to RIP 
-+ * at the top of the kernel process stack.	
-+ * - partial stack frame: partially saved registers upto R11.
-+ * - full stack frame: Like partial stack frame, but all register saved. 
-+ *	
-+ * TODO:	 
-+ * - schedule it carefully for the final hardware.
-+ */
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
 +
-+#define ASSEMBLY 1
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/msr.h>
-+#include <asm/unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/hw_irq.h>
-+#include <asm/page.h>
-+#include <asm/irqflags.h>
-+#include <asm/errno.h>
-+#include <xen/interface/arch-x86_64.h>
-+#include <xen/interface/features.h>
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRDA_DEBUG=y
 +
-+#include "irq_vectors.h"
++#
++# Infrared-port device drivers
++#
 +
-+#include "xen_entry.S"
-+	
-+	.code64
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
 +
-+#ifndef CONFIG_PREEMPT
-+#define retint_kernel retint_restore_args
-+#endif	
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
 +
++#
++# Old SIR device drivers
++#
 +
-+.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
-+	jnc  1f
-+	TRACE_IRQS_ON
-+1:
-+#endif
-+.endm
++#
++# Old Serial dongle support
++#
 +
-+NMI_MASK = 0x80000000
-+	
-+/*
-+ * C code is not supposed to know about undefined top of stack. Every time 
-+ * a C function with an pt_regs argument is called from the SYSCALL based 
-+ * fast path FIXUP_TOP_OF_STACK is needed.
-+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
-+ * manipulation.
-+ */        	
-+		
-+	/* %rsp:at FRAMEEND */ 
-+	.macro FIXUP_TOP_OF_STACK tmp
-+	movq    $__USER_CS,CS(%rsp)
-+	movq 	$-1,RCX(%rsp)
-+	.endm
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_TOSHIBA_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
 +
-+	.macro RESTORE_TOP_OF_STACK tmp,offset=0
-+	.endm
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_IEEE80211=m
++# CONFIG_IEEE80211_DEBUG is not set
++CONFIG_IEEE80211_CRYPT_WEP=m
++CONFIG_IEEE80211_CRYPT_CCMP=m
++CONFIG_IEEE80211_CRYPT_TKIP=m
++CONFIG_IEEE80211_SOFTMAC=m
++# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
++CONFIG_WIRELESS_EXT=y
 +
-+	.macro FAKE_STACK_FRAME child_rip
-+	/* push in order ss, rsp, eflags, cs, rip */
-+	xorl %eax, %eax
-+	pushq %rax /* ss */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	/*CFI_REL_OFFSET	ss,0*/
-+	pushq %rax /* rsp */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_REL_OFFSET	rsp,0
-+	pushq $(1<<9) /* eflags - interrupts on */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	/*CFI_REL_OFFSET	rflags,0*/
-+	pushq $__KERNEL_CS /* cs */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	/*CFI_REL_OFFSET	cs,0*/
-+	pushq \child_rip /* rip */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_REL_OFFSET	rip,0
-+	pushq	%rax /* orig rax */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	.endm
++#
++# Device Drivers
++#
 +
-+	.macro UNFAKE_STACK_FRAME
-+	addq $8*6, %rsp
-+	CFI_ADJUST_CFA_OFFSET	-(6*8)
-+	.endm
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
 +
-+	.macro	CFI_DEFAULT_STACK start=1,adj=0
-+	.if \start
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,SS+8-(\adj*ARGOFFSET)
-+	.else
-+	CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
-+	.endif
-+	.if \adj == 0
-+	CFI_REL_OFFSET	r15,R15
-+	CFI_REL_OFFSET	r14,R14
-+	CFI_REL_OFFSET	r13,R13
-+	CFI_REL_OFFSET	r12,R12
-+	CFI_REL_OFFSET	rbp,RBP
-+	CFI_REL_OFFSET	rbx,RBX
-+	.endif
-+	CFI_REL_OFFSET	r11,R11
-+	CFI_REL_OFFSET	r10,R10
-+	CFI_REL_OFFSET	r9,R9
-+	CFI_REL_OFFSET	r8,R8
-+	CFI_REL_OFFSET	rax,RAX
-+	CFI_REL_OFFSET	rcx,RCX
-+	CFI_REL_OFFSET	rdx,RDX
-+	CFI_REL_OFFSET	rsi,RSI
-+	CFI_REL_OFFSET	rdi,RDI
-+	CFI_REL_OFFSET	rip,RIP
-+	/*CFI_REL_OFFSET	cs,CS*/
-+	/*CFI_REL_OFFSET	rflags,EFLAGS*/
-+	CFI_REL_OFFSET	rsp,RSP
-+	/*CFI_REL_OFFSET	ss,SS*/
-+	.endm
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++CONFIG_CONNECTOR=m
 +
-+        /*
-+         * Must be consistent with the definition in arch-x86/xen-x86_64.h:
-+         *     struct iret_context {
-+         *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+         *     };
-+         * with rax, r11, and rcx being taken care of in the hypercall stub.
-+         */
-+	.macro HYPERVISOR_IRET flag
-+	testb $3,1*8(%rsp)
-+	jnz   2f
-+	testl $NMI_MASK,2*8(%rsp)
-+	jnz   2f
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
 +
-+	cmpb  $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
-+	jne   1f
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
 +
-+	/* Direct iret to kernel space. Correct CS and SS. */
-+	orl   $3,1*8(%rsp)
-+	orl   $3,4*8(%rsp)
-+1:	iretq
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_GEOMETRY is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
 +
-+2:	/* Slow iret via hypervisor. */
-+	andl  $~NMI_MASK, 2*8(%rsp)
-+	pushq $\flag
-+	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
-+	.endm
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++CONFIG_MTD_PNC2000=m
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_SCx200_DOCFLASH=m
++CONFIG_MTD_AMD76XROM=m
++CONFIG_MTD_ICHXROM=m
++CONFIG_MTD_SCB2_FLASH=m
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_DILNETPC=m
++CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
++CONFIG_MTD_L440GX=m
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PLATRAM=m
 +
-+/*
-+ * A newly forked process directly context switches into this.
-+ */ 	
-+/* rdi:	prev */	
-+ENTRY(ret_from_fork)
-+	CFI_DEFAULT_STACK
-+	call schedule_tail
-+	GET_THREAD_INFO(%rcx)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+	jnz rff_trace
-+rff_action:	
-+	RESTORE_REST
-+	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
-+	je   int_ret_from_sys_call
-+	testl $_TIF_IA32,threadinfo_flags(%rcx)
-+	jnz  int_ret_from_sys_call
-+	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
-+	jmp ret_from_sys_call
-+rff_trace:
-+	movq %rsp,%rdi
-+	call syscall_trace_leave
-+	GET_THREAD_INFO(%rcx)	
-+	jmp rff_action
-+	CFI_ENDPROC
-+END(ret_from_fork)
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++CONFIG_MTD_M25P80=m
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
 +
-+/*
-+ * initial frame state for interrupts and exceptions
-+ */
-+	.macro _frame ref
-+	CFI_STARTPROC simple
-+	CFI_DEF_CFA rsp,SS+8-\ref
-+	/*CFI_REL_OFFSET ss,SS-\ref*/
-+	CFI_REL_OFFSET rsp,RSP-\ref
-+	/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
-+	/*CFI_REL_OFFSET cs,CS-\ref*/
-+	CFI_REL_OFFSET rip,RIP-\ref
-+	.endm
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
 +
-+/*
-+ * System call entry. Upto 6 arguments in registers are supported.
-+ *
-+ * SYSCALL does not save anything on the stack and does not change the
-+ * stack pointer.
-+ */
-+		
-+/*
-+ * Register setup:	
-+ * rax  system call number
-+ * rdi  arg0
-+ * rcx  return address for syscall/sysret, C arg3 
-+ * rsi  arg1
-+ * rdx  arg2	
-+ * r10  arg3 	(--> moved to rcx for C)
-+ * r8   arg4
-+ * r9   arg5
-+ * r11  eflags for syscall/sysret, temporary for C
-+ * r12-r15,rbp,rbx saved by C code, not touched. 		
-+ * 
-+ * Interrupts are off on entry.
-+ * Only called from user space.
-+ *
-+ * XXX	if we had a free scratch register we could save the RSP into the stack frame
-+ *      and report it properly in ps. Unfortunately we haven't.
-+ *
-+ * When user can change the frames always force IRET. That is because
-+ * it deals with uncanonical addresses better. SYSRET has trouble
-+ * with them due to bugs in both AMD and Intel CPUs.
-+ */ 			 		
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
++CONFIG_MTD_NAND_NANDSIM=m
 +
-+ENTRY(system_call)
-+	_frame (RIP-0x10)
-+	SAVE_ARGS -8,0
-+	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
-+	/*
-+	 * No need to follow this irqs off/on section - it's straight
-+	 * and short:
-+	 */
-+        XEN_UNBLOCK_EVENTS(%r11)        
-+	GET_THREAD_INFO(%rcx)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+	CFI_REMEMBER_STATE
-+	jnz tracesys
-+	cmpq $__NR_syscall_max,%rax
-+	ja badsys
-+	movq %r10,%rcx
-+	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+/*
-+ * Syscall return path ending with SYSRET (fast path)
-+ * Has incomplete stack frame and undefined top of stack. 
-+ */		
-+	.globl ret_from_sys_call
-+ret_from_sys_call:
-+	movl $_TIF_ALLWORK_MASK,%edi
-+	/* edi:	flagmask */
-+sysret_check:		
-+	GET_THREAD_INFO(%rcx)
-+        XEN_BLOCK_EVENTS(%rsi)        
-+	TRACE_IRQS_OFF
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	CFI_REMEMBER_STATE
-+	jnz  sysret_careful 
-+	/*
-+	 * sysretq will re-enable interrupts:
-+	 */
-+	TRACE_IRQS_ON
-+        XEN_UNBLOCK_EVENTS(%rsi)                
-+	RESTORE_ARGS 0,8,0
-+        HYPERVISOR_IRET VGCF_IN_SYSCALL
++#
++# OneNAND Flash Device Drivers
++#
++CONFIG_MTD_ONENAND=m
++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
++# CONFIG_MTD_ONENAND_OTP is not set
 +
-+	/* Handle reschedules */
-+	/* edx:	work, edi: workmask */	
-+sysret_careful:
-+	CFI_RESTORE_STATE
-+	bt $TIF_NEED_RESCHED,%edx
-+	jnc sysret_signal
-+	TRACE_IRQS_ON
-+	XEN_UNBLOCK_EVENTS(%rsi)
-+	pushq %rdi
-+	CFI_ADJUST_CFA_OFFSET 8
-+	call schedule
-+	popq  %rdi
-+	CFI_ADJUST_CFA_OFFSET -8
-+	jmp sysret_check
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++CONFIG_PARPORT_PC_SUPERIO=y
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
 +
-+	/* Handle a signal */ 
-+sysret_signal:
-+	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)        
-+	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+	jz    1f
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG is not set
 +
-+	/* Really a signal */
-+	/* edx:	work flags (arg3) */
-+	leaq do_notify_resume(%rip),%rax
-+	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
-+	xorl %esi,%esi # oldset -> arg2
-+	call ptregscall_common
-+1:	movl $_TIF_NEED_RESCHED,%edi
-+	/* Use IRET because user could have changed frame. This
-+	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
-+	XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+	jmp int_with_check
-+	
-+badsys:
-+	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+	jmp ret_from_sys_call
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
 +
-+	/* Do syscall tracing */
-+tracesys:			 
-+	CFI_RESTORE_STATE
-+	SAVE_REST
-+	movq $-ENOSYS,RAX(%rsp)
-+	FIXUP_TOP_OF_STACK %rdi
-+	movq %rsp,%rdi
-+	call syscall_trace_enter
-+	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	cmpq $__NR_syscall_max,%rax
-+	ja  1f
-+	movq %r10,%rcx	/* fixup for C */
-+	call *sys_call_table(,%rax,8)
-+1:	movq %rax,RAX-ARGOFFSET(%rsp)
-+	/* Use IRET because user could have changed frame */
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+END(system_call)
-+		
-+/* 
-+ * Syscall return path ending with IRET.
-+ * Has correct top of stack, but partial stack frame.
-+ */ 	
-+ENTRY(int_ret_from_sys_call)
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
-+	/*CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
-+	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
-+	/*CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
-+	/*CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
-+	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
-+	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
-+	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
-+	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
-+	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
-+	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
-+	CFI_REL_OFFSET	r8,R8-ARGOFFSET
-+	CFI_REL_OFFSET	r9,R9-ARGOFFSET
-+	CFI_REL_OFFSET	r10,R10-ARGOFFSET
-+	CFI_REL_OFFSET	r11,R11-ARGOFFSET
-+        XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+	testb $3,CS-ARGOFFSET(%rsp)
-+        jnz 1f
-+        /* Need to set the proper %ss (not NULL) for ring 3 iretq */
-+        movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
-+        jmp retint_restore_args   # retrun from ring3 kernel
-+1:              
-+	movl $_TIF_ALLWORK_MASK,%edi
-+	/* edi:	mask to check */
-+int_with_check:
-+	GET_THREAD_INFO(%rcx)
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	jnz   int_careful
-+	andl    $~TS_COMPAT,threadinfo_status(%rcx)
-+	jmp   retint_restore_args
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
 +
-+	/* Either reschedule or signal or syscall exit tracking needed. */
-+	/* First do a reschedule test. */
-+	/* edx:	work, edi: workmask */
-+int_careful:
-+	bt $TIF_NEED_RESCHED,%edx
-+	jnc  int_very_careful
-+	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	pushq %rdi
-+	CFI_ADJUST_CFA_OFFSET 8
-+	call schedule
-+	popq %rdi
-+	CFI_ADJUST_CFA_OFFSET -8
-+	XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+	jmp int_with_check
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
 +
-+	/* handle signals and tracing -- both require a full stack frame */
-+int_very_careful:
-+	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	SAVE_REST
-+	/* Check for syscall exit trace */	
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
-+	jz int_signal
-+	pushq %rdi
-+	CFI_ADJUST_CFA_OFFSET 8
-+	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
-+	call syscall_trace_leave
-+	popq %rdi
-+	CFI_ADJUST_CFA_OFFSET -8
-+	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-+	XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+	jmp int_restore_rest
-+	
-+int_signal:
-+	testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
-+	jz 1f
-+	movq %rsp,%rdi		# &ptregs -> arg1
-+	xorl %esi,%esi		# oldset -> arg2
-+	call do_notify_resume
-+1:	movl $_TIF_NEED_RESCHED,%edi	
-+int_restore_rest:
-+	RESTORE_REST
-+	XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+	jmp int_with_check
-+	CFI_ENDPROC
-+END(int_ret_from_sys_call)
-+		
-+/* 
-+ * Certain special system calls that need to save a complete full stack frame.
-+ */ 								
-+	
-+	.macro PTREGSCALL label,func,arg
-+	.globl \label
-+\label:
-+	leaq	\func(%rip),%rax
-+	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+	jmp	ptregscall_common
-+END(\label)
-+	.endm
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_BPCK6=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++CONFIG_CDROM_PKTCDVD_WCACHE=y
++CONFIG_ATA_OVER_ETH=m
 +
-+	CFI_STARTPROC
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
 +
-+	PTREGSCALL stub_clone, sys_clone, %r8
-+	PTREGSCALL stub_fork, sys_fork, %rdi
-+	PTREGSCALL stub_vfork, sys_vfork, %rdi
-+	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
-+	PTREGSCALL stub_iopl, sys_iopl, %rsi
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=m
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECS=m
++CONFIG_BLK_DEV_IDECD=m
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=m
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
 +
-+ENTRY(ptregscall_common)
-+	popq %r11
-+	CFI_ADJUST_CFA_OFFSET -8
-+	CFI_REGISTER rip, r11
-+	SAVE_REST
-+	movq %r11, %r15
-+	CFI_REGISTER rip, r15
-+	FIXUP_TOP_OF_STACK %r11
-+	call *%rax
-+	RESTORE_TOP_OF_STACK %r11
-+	movq %r15, %r11
-+	CFI_REGISTER rip, r11
-+	RESTORE_REST
-+	pushq %r11
-+	CFI_ADJUST_CFA_OFFSET 8
-+	CFI_REL_OFFSET rip, 0
-+	ret
-+	CFI_ENDPROC
-+END(ptregscall_common)
-+	
-+ENTRY(stub_execve)
-+	CFI_STARTPROC
-+	popq %r11
-+	CFI_ADJUST_CFA_OFFSET -8
-+	CFI_REGISTER rip, r11
-+	SAVE_REST
-+	FIXUP_TOP_OF_STACK %r11
-+	call sys_execve
-+	RESTORE_TOP_OF_STACK %r11
-+	movq %rax,RAX(%rsp)
-+	RESTORE_REST
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+END(stub_execve)
-+	
-+/*
-+ * sigreturn is special because it needs to restore all registers on return.
-+ * This cannot be done with SYSRET, so use the IRET return path instead.
-+ */                
-+ENTRY(stub_rt_sigreturn)
-+	CFI_STARTPROC
-+	addq $8, %rsp
-+	CFI_ADJUST_CFA_OFFSET	-8
-+	SAVE_REST
-+	movq %rsp,%rdi
-+	FIXUP_TOP_OF_STACK %r11
-+	call sys_rt_sigreturn
-+	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
-+	RESTORE_REST
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+END(stub_rt_sigreturn)
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=m
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_BLK_DEV_OFFBOARD=y
++CONFIG_BLK_DEV_GENERIC=y
++CONFIG_BLK_DEV_OPTI621=m
++CONFIG_BLK_DEV_RZ1000=m
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=m
++CONFIG_BLK_DEV_ALI15X3=m
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=m
++CONFIG_BLK_DEV_ATIIXP=m
++CONFIG_BLK_DEV_CMD64X=m
++CONFIG_BLK_DEV_TRIFLEX=m
++CONFIG_BLK_DEV_CY82C693=m
++CONFIG_BLK_DEV_CS5520=m
++CONFIG_BLK_DEV_CS5530=m
++CONFIG_BLK_DEV_CS5535=m
++CONFIG_BLK_DEV_HPT34X=m
++CONFIG_HPT34X_AUTODMA=y
++CONFIG_BLK_DEV_HPT366=m
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=m
++CONFIG_BLK_DEV_IT821X=m
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=m
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=m
++CONFIG_BLK_DEV_SVWKS=m
++CONFIG_BLK_DEV_SIIMAGE=m
++CONFIG_BLK_DEV_SIS5513=m
++CONFIG_BLK_DEV_SLC90E66=m
++CONFIG_BLK_DEV_TRM290=m
++CONFIG_BLK_DEV_VIA82CXXX=m
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
 +
-+/* initial frame state for interrupts (and exceptions without error code) */
-+#define INTR_FRAME _frame (RIP-0x10); \
-+	CFI_REL_OFFSET rcx,0; \
-+	CFI_REL_OFFSET r11,8
++#
++# SCSI device support
++#
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
 +
-+/* initial frame state for exceptions with error code (and interrupts with
-+   vector already pushed) */
-+#define XCPT_FRAME _frame (RIP-0x18); \
-+	CFI_REL_OFFSET rcx,0; \
-+	CFI_REL_OFFSET r11,8
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++CONFIG_CHR_DEV_SCH=m
 +
-+/* 
-+ * Interrupt exit.
-+ *
-+ */ 
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
 +
-+retint_check:
-+	CFI_DEFAULT_STACK adj=1
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	CFI_REMEMBER_STATE
-+	jnz  retint_careful
-+retint_restore_args:
-+	movl EFLAGS-REST_SKIP(%rsp), %eax
-+	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
-+	XEN_GET_VCPU_INFO(%rsi)
-+	andb evtchn_upcall_mask(%rsi),%al
-+	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
-+	jnz restore_all_enable_events	#        != 0 => enable event delivery
-+	XEN_PUT_VCPU_INFO(%rsi)
-+		
-+	RESTORE_ARGS 0,8,0
-+	HYPERVISOR_IRET 0
-+	
-+	/* edi: workmask, edx: work */
-+retint_careful:
-+	CFI_RESTORE_STATE
-+	bt    $TIF_NEED_RESCHED,%edx
-+	jnc   retint_signal
-+	TRACE_IRQS_ON
-+	XEN_UNBLOCK_EVENTS(%rsi)
-+/*	sti */        
-+	pushq %rdi
-+	CFI_ADJUST_CFA_OFFSET	8
-+	call  schedule
-+	popq %rdi		
-+	CFI_ADJUST_CFA_OFFSET	-8
-+	GET_THREAD_INFO(%rcx)
-+	XEN_BLOCK_EVENTS(%rsi)		
-+/*	cli */
-+	TRACE_IRQS_OFF
-+	jmp retint_check
-+	
-+retint_signal:
-+	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+	jz    retint_restore_args
-+	TRACE_IRQS_ON
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	SAVE_REST
-+	movq $-1,ORIG_RAX(%rsp) 			
-+	xorl %esi,%esi		# oldset
-+	movq %rsp,%rdi		# &pt_regs
-+	call do_notify_resume
-+	RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)		
-+	TRACE_IRQS_OFF
-+	movl $_TIF_NEED_RESCHED,%edi
-+	GET_THREAD_INFO(%rcx)
-+	jmp retint_check
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
 +
-+#ifdef CONFIG_PREEMPT
-+	/* Returning to kernel space. Check if we need preemption */
-+	/* rcx:	 threadinfo. interrupts off. */
-+	.p2align
-+retint_kernel:	
-+	cmpl $0,threadinfo_preempt_count(%rcx)
-+	jnz  retint_restore_args
-+	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
-+	jnc  retint_restore_args
-+	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
-+	jnc  retint_restore_args
-+	call preempt_schedule_irq
-+	jmp retint_kernel       /* check again */
-+#endif	
++#
++# SCSI low-level drivers
++#
++CONFIG_ISCSI_TCP=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++CONFIG_AIC79XX_ENABLE_RD_STRM=y
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_SATA=m
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_MV=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_PDC_ADMA=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_SATA_QSTOR=m
++CONFIG_SCSI_SATA_PROMISE=m
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIL24=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_SCSI_LPFC=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_NSP32=m
++CONFIG_SCSI_DEBUG=m
 +
-+	CFI_ENDPROC
-+END(retint_check)
-+	
-+#ifndef CONFIG_XEN
-+/*
-+ * APIC interrupts.
-+ */		
-+	.macro apicinterrupt num,func
-+	INTR_FRAME
-+	pushq $~(\num)
-+	CFI_ADJUST_CFA_OFFSET 8
-+	interrupt \func
-+	jmp error_entry
-+	CFI_ENDPROC
-+	.endm
++#
++# PCMCIA SCSI adapter support
++#
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_NINJA_SCSI=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
 +
-+ENTRY(thermal_interrupt)
-+	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
-+END(thermal_interrupt)
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_RAID5_RESHAPE=y
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
 +
-+ENTRY(threshold_interrupt)
-+	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
-+END(threshold_interrupt)
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
 +
-+#ifdef CONFIG_SMP	
-+ENTRY(reschedule_interrupt)
-+	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
-+END(reschedule_interrupt)
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
 +
-+	.macro INVALIDATE_ENTRY num
-+ENTRY(invalidate_interrupt\num)
-+	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
-+END(invalidate_interrupt\num)
-+	.endm
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++# CONFIG_IEEE1394_OUI_DB is not set
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++CONFIG_IEEE1394_EXPORT_FULL_API=y
 +
-+	INVALIDATE_ENTRY 0
-+	INVALIDATE_ENTRY 1
-+	INVALIDATE_ENTRY 2
-+	INVALIDATE_ENTRY 3
-+	INVALIDATE_ENTRY 4
-+	INVALIDATE_ENTRY 5
-+	INVALIDATE_ENTRY 6
-+	INVALIDATE_ENTRY 7
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
 +
-+ENTRY(call_function_interrupt)
-+	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
-+END(call_function_interrupt)
-+#endif
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
 +
-+#ifdef CONFIG_X86_LOCAL_APIC	
-+ENTRY(apic_timer_interrupt)
-+	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
-+END(apic_timer_interrupt)
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
++CONFIG_I2O_EXT_ADAPTEC=y
++CONFIG_I2O_EXT_ADAPTEC_DMA64=y
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_CONFIG_OLD_IOCTL=y
++CONFIG_I2O_BUS=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
 +
-+ENTRY(error_interrupt)
-+	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
-+END(error_interrupt)
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
 +
-+ENTRY(spurious_interrupt)
-+	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
-+END(spurious_interrupt)
-+#endif
-+#endif /* !CONFIG_XEN */
-+				
-+/*
-+ * Exception entry points.
-+ */ 		
-+	.macro zeroentry sym
-+	INTR_FRAME
-+        movq (%rsp),%rcx
-+	CFI_RESTORE rcx
-+        movq 8(%rsp),%r11
-+	CFI_RESTORE r11
-+        addq $0x10,%rsp /* skip rcx and r11 */
-+	CFI_ADJUST_CFA_OFFSET -0x10
-+	pushq $0	/* push error code/oldrax */ 
-+	CFI_ADJUST_CFA_OFFSET 8
-+	pushq %rax	/* push real oldrax to the rdi slot */ 
-+	CFI_ADJUST_CFA_OFFSET 8
-+	CFI_REL_OFFSET rax,0
-+	leaq  \sym(%rip),%rax
-+	jmp error_entry
-+	CFI_ENDPROC
-+	.endm	
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++# CONFIG_ARCNET_COM20020 is not set
 +
-+	.macro errorentry sym
-+	XCPT_FRAME
-+        movq (%rsp),%rcx
-+	CFI_RESTORE rcx
-+        movq 8(%rsp),%r11
-+	CFI_RESTORE r11
-+        addq $0x10,%rsp /* rsp points to the error code */
-+	CFI_ADJUST_CFA_OFFSET -0x10
-+	pushq %rax
-+	CFI_ADJUST_CFA_OFFSET 8
-+	CFI_REL_OFFSET rax,0
-+	leaq  \sym(%rip),%rax
-+	jmp error_entry
-+	CFI_ENDPROC
-+	.endm
++#
++# PHY device support
++#
++CONFIG_PHYLIB=m
 +
-+#if 0 /* not XEN */
-+	/* error code is on the stack already */
-+	/* handle NMI like exceptions that can happen everywhere */
-+	.macro paranoidentry sym, ist=0, irqtrace=1
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */        
-+	SAVE_ALL
-+	cld
-+#if 0 /* not XEN */
-+	movl $1,%ebx
-+	movl  $MSR_GS_BASE,%ecx
-+	rdmsr
-+	testl %edx,%edx
-+	js    1f
-+	swapgs
-+	xorl  %ebx,%ebx
-+1:
-+#endif
-+	.if \ist
-+	movq	%gs:pda_data_offset, %rbp
-+	.endif
-+	movq %rsp,%rdi
-+	movq ORIG_RAX(%rsp),%rsi
-+	movq $-1,ORIG_RAX(%rsp)
-+	.if \ist
-+	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+	.endif
-+	call \sym
-+	.if \ist
-+	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+	.endif
-+/*	cli */
-+	XEN_BLOCK_EVENTS(%rsi)		
-+	.if \irqtrace
-+	TRACE_IRQS_OFF
-+	.endif
-+	.endm
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_VITESSE_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_FIXED_PHY=m
++# CONFIG_FIXED_MII_10_FDX is not set
++# CONFIG_FIXED_MII_100_FDX is not set
 +
-+	/*
-+ 	 * "Paranoid" exit path from exception stack.
-+  	 * Paranoid because this is used by NMIs and cannot take
-+	 * any kernel state for granted.
-+	 * We don't do kernel preemption checks here, because only
-+	 * NMI should be common and it does not enable IRQs and
-+	 * cannot get reschedule ticks.
-+	 *
-+	 * "trace" is 0 for the NMI handler only, because irq-tracing
-+	 * is fundamentally NMI-unsafe. (we cannot change the soft and
-+	 * hard flags at once, atomically)
-+	 */
-+	.macro paranoidexit trace=1
-+	/* ebx:	no swapgs flag */
-+paranoid_exit\trace:
-+	testl %ebx,%ebx				/* swapgs needed? */
-+	jnz paranoid_restore\trace
-+	testl $3,CS(%rsp)
-+	jnz   paranoid_userspace\trace
-+paranoid_swapgs\trace:
-+	TRACE_IRQS_IRETQ 0
-+	swapgs
-+paranoid_restore\trace:
-+	RESTORE_ALL 8
-+	iretq
-+paranoid_userspace\trace:
-+	GET_THREAD_INFO(%rcx)
-+	movl threadinfo_flags(%rcx),%ebx
-+	andl $_TIF_WORK_MASK,%ebx
-+	jz paranoid_swapgs\trace
-+	movq %rsp,%rdi			/* &pt_regs */
-+	call sync_regs
-+	movq %rax,%rsp			/* switch stack for scheduling */
-+	testl $_TIF_NEED_RESCHED,%ebx
-+	jnz paranoid_schedule\trace
-+	movl %ebx,%edx			/* arg3: thread flags */
-+	.if \trace
-+	TRACE_IRQS_ON
-+	.endif
-+	sti
-+	xorl %esi,%esi 			/* arg2: oldset */
-+	movq %rsp,%rdi 			/* arg1: &pt_regs */
-+	call do_notify_resume
-+	cli
-+	.if \trace
-+	TRACE_IRQS_OFF
-+	.endif
-+	jmp paranoid_userspace\trace
-+paranoid_schedule\trace:
-+	.if \trace
-+	TRACE_IRQS_ON
-+	.endif
-+	sti
-+	call schedule
-+	cli
-+	.if \trace
-+	TRACE_IRQS_OFF
-+	.endif
-+	jmp paranoid_userspace\trace
-+	CFI_ENDPROC
-+	.endm
-+#endif
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_HP100=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++# CONFIG_AMD8111E_NAPI is not set
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_ADAPTEC_STARFIRE_NAPI=y
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_DGRS=m
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_TLAN=m
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++CONFIG_NET_POCKET=y
++CONFIG_ATP=m
++CONFIG_DE600=m
++CONFIG_DE620=m
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++CONFIG_E1000_NAPI=y
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++# CONFIG_R8169_NAPI is not set
++CONFIG_R8169_VLAN=y
++CONFIG_SIS190=m
++CONFIG_SKGE=m
++CONFIG_SKY2=m
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_CHELSIO_T1=m
++CONFIG_IXGB=m
++CONFIG_IXGB_NAPI=y
++CONFIG_S2IO=m
++CONFIG_S2IO_NAPI=y
++CONFIG_MYRI10GE=m
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMOL=m
++CONFIG_IBMLS=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_ABYSS=m
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++CONFIG_NET_WIRELESS_RTNETLINK=y
 +
-+/*
-+ * Exception entry point. This expects an error code/orig_rax on the stack
-+ * and the exception handler in %rax.	
-+ */ 		  				
-+ENTRY(error_entry)
-+	_frame RDI
-+	CFI_REL_OFFSET rax,0
-+	/* rdi slot contains rax, oldrax contains error code */
-+	cld	
-+	subq  $14*8,%rsp
-+	CFI_ADJUST_CFA_OFFSET	(14*8)
-+	movq %rsi,13*8(%rsp)
-+	CFI_REL_OFFSET	rsi,RSI
-+	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
-+	CFI_REGISTER	rax,rsi
-+	movq %rdx,12*8(%rsp)
-+	CFI_REL_OFFSET	rdx,RDX
-+	movq %rcx,11*8(%rsp)
-+	CFI_REL_OFFSET	rcx,RCX
-+	movq %rsi,10*8(%rsp)	/* store rax */ 
-+	CFI_REL_OFFSET	rax,RAX
-+	movq %r8, 9*8(%rsp)
-+	CFI_REL_OFFSET	r8,R8
-+	movq %r9, 8*8(%rsp)
-+	CFI_REL_OFFSET	r9,R9
-+	movq %r10,7*8(%rsp)
-+	CFI_REL_OFFSET	r10,R10
-+	movq %r11,6*8(%rsp)
-+	CFI_REL_OFFSET	r11,R11
-+	movq %rbx,5*8(%rsp) 
-+	CFI_REL_OFFSET	rbx,RBX
-+	movq %rbp,4*8(%rsp) 
-+	CFI_REL_OFFSET	rbp,RBP
-+	movq %r12,3*8(%rsp) 
-+	CFI_REL_OFFSET	r12,R12
-+	movq %r13,2*8(%rsp) 
-+	CFI_REL_OFFSET	r13,R13
-+	movq %r14,1*8(%rsp) 
-+	CFI_REL_OFFSET	r14,R14
-+	movq %r15,(%rsp) 
-+	CFI_REL_OFFSET	r15,R15
-+#if 0        
-+	cmpl $__KERNEL_CS,CS(%rsp)
-+	CFI_REMEMBER_STATE
-+	je  error_kernelspace
-+#endif        
-+error_call_handler:
-+	movq %rdi, RDI(%rsp)            
-+	CFI_REL_OFFSET	rdi,RDI
-+	movq %rsp,%rdi
-+	movq ORIG_RAX(%rsp),%rsi	# get error code 
-+	movq $-1,ORIG_RAX(%rsp)
-+	call *%rax
-+error_exit:		
-+	RESTORE_REST
-+/*	cli */
-+	XEN_BLOCK_EVENTS(%rsi)		
-+	TRACE_IRQS_OFF
-+	GET_THREAD_INFO(%rcx)	
-+	testb $3,CS-ARGOFFSET(%rsp)
-+	jz retint_kernel
-+	movl  threadinfo_flags(%rcx),%edx
-+	movl  $_TIF_WORK_MASK,%edi	
-+	andl  %edi,%edx
-+	jnz   retint_careful
-+	/*
-+	 * The iret might restore flags:
-+	 */
-+	TRACE_IRQS_IRETQ
-+	jmp   retint_restore_args
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
 +
-+#if 0
-+         /*
-+         * We need to re-write the logic here because we don't do iretq to 
-+         * to return to user mode. It's still possible that we get trap/fault
-+         * in the kernel (when accessing buffers pointed to by system calls, 
-+         * for example).
-+         *
-+         */           
-+	CFI_RESTORE_STATE
-+error_kernelspace:
-+	incl %ebx
-+       /* There are two places in the kernel that can potentially fault with
-+          usergs. Handle them here. The exception handlers after
-+	   iret run with kernel gs again, so don't set the user space flag.
-+	   B stepping K8s sometimes report an truncated RIP for IRET 
-+	   exceptions returning to compat mode. Check for these here too. */
-+	leaq iret_label(%rip),%rbp
-+	cmpq %rbp,RIP(%rsp) 
-+	je   error_swapgs
-+	movl %ebp,%ebp	/* zero extend */
-+	cmpq %rbp,RIP(%rsp) 
-+	je   error_swapgs
-+	cmpq $gs_change,RIP(%rsp)
-+        je   error_swapgs
-+	jmp  error_sti
-+#endif
-+	CFI_ENDPROC
-+END(error_entry)
-+	
-+ENTRY(hypervisor_callback)
-+	zeroentry do_hypervisor_callback
-+END(hypervisor_callback)
-+        
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */               
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
-+	CFI_STARTPROC
-+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
-+# see the correct pointer to the pt_regs
-+	movq %rdi, %rsp            # we don't return, adjust the stack frame
-+	CFI_ENDPROC
-+	CFI_DEFAULT_STACK
-+11:	incl %gs:pda_irqcount
-+	movq %rsp,%rbp
-+	CFI_DEF_CFA_REGISTER rbp
-+	cmovzq %gs:pda_irqstackptr,%rsp
-+	pushq %rbp			# backlink for old unwinder
-+	call evtchn_do_upcall
-+	popq %rsp
-+	CFI_DEF_CFA_REGISTER rsp
-+	decl %gs:pda_irqcount
-+	jmp  error_exit
-+	CFI_ENDPROC
-+END(do_hypervisor_callback)
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+KPROBE_ENTRY(nmi)
-+	zeroentry do_nmi_callback
-+ENTRY(do_nmi_callback)
-+	CFI_STARTPROC
-+        addq $8, %rsp
-+	CFI_ENDPROC
-+	CFI_DEFAULT_STACK
-+        call do_nmi
-+        orl  $NMI_MASK,EFLAGS(%rsp)
-+        RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
-+        GET_THREAD_INFO(%rcx)
-+        jmp  retint_restore_args
-+	CFI_ENDPROC
-+	.previous .text
-+END(nmi)
-+#endif
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++CONFIG_IPW2200_RADIOTAP=y
++CONFIG_IPW2200_PROMISCUOUS=y
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
 +
-+        ALIGN
-+restore_all_enable_events:  
-+	CFI_DEFAULT_STACK adj=1
-+	TRACE_IRQS_ON
-+	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++CONFIG_PCMCIA_HERMES=m
++CONFIG_PCMCIA_SPECTRUM=m
++CONFIG_AIRO_CS=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_PCMCIA_WL3501=m
 +
-+scrit:	/**** START OF CRITICAL REGION ****/
-+	XEN_TEST_PENDING(%rsi)
-+	CFI_REMEMBER_STATE
-+	jnz  14f			# process more events if necessary...
-+	XEN_PUT_VCPU_INFO(%rsi)
-+        RESTORE_ARGS 0,8,0
-+        HYPERVISOR_IRET 0
-+        
-+	CFI_RESTORE_STATE
-+14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
-+	XEN_PUT_VCPU_INFO(%rsi)
-+	SAVE_REST
-+        movq %rsp,%rdi                  # set the argument again
-+	jmp  11b
-+	CFI_ENDPROC
-+ecrit:  /**** END OF CRITICAL REGION ****/
-+# At this point, unlike on x86-32, we don't do the fixup to simplify the 
-+# code and the stack frame is more complex on x86-64.
-+# When the kernel is interrupted in the critical section, the kernel 
-+# will do IRET in that case, and everything will be restored at that point, 
-+# i.e. it just resumes from the next instruction interrupted with the same context. 
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_USB_ZD1201=m
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++CONFIG_HOSTAP_CS=m
++CONFIG_BCM43XX=m
++CONFIG_BCM43XX_DEBUG=y
++CONFIG_BCM43XX_DMA=y
++CONFIG_BCM43XX_PIO=y
++CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
++# CONFIG_BCM43XX_DMA_MODE is not set
++# CONFIG_BCM43XX_PIO_MODE is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_NET_WIRELESS=y
 +
-+# Hypervisor uses this for application faults while it executes.
-+# We get here for two reasons:
-+#  1. Fault while reloading DS, ES, FS or GS
-+#  2. Fault while executing IRET
-+# Category 1 we do not need to fix up as Xen has already reloaded all segment
-+# registers that could be reloaded and zeroed the others.
-+# Category 2 we fix up by killing the current process. We cannot use the
-+# normal Linux return path in this case because if we use the IRET hypercall
-+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
-+# We distinguish between categories by comparing each saved segment register
-+# with its current contents: any discrepancy means we in category 1.
-+ENTRY(failsafe_callback)
-+	_frame (RIP-0x30)
-+	CFI_REL_OFFSET rcx, 0
-+	CFI_REL_OFFSET r11, 8
-+	movw %ds,%cx
-+	cmpw %cx,0x10(%rsp)
-+	CFI_REMEMBER_STATE
-+	jne 1f
-+	movw %es,%cx
-+	cmpw %cx,0x18(%rsp)
-+	jne 1f
-+	movw %fs,%cx
-+	cmpw %cx,0x20(%rsp)
-+	jne 1f
-+	movw %gs,%cx
-+	cmpw %cx,0x28(%rsp)
-+	jne 1f
-+	/* All segments match their saved values => Category 2 (Bad IRET). */
-+	movq (%rsp),%rcx
-+	CFI_RESTORE rcx
-+	movq 8(%rsp),%r11
-+	CFI_RESTORE r11
-+	addq $0x30,%rsp
-+	CFI_ADJUST_CFA_OFFSET -0x30
-+	movq $11,%rdi	/* SIGSEGV */
-+	jmp do_exit			
-+	CFI_RESTORE_STATE
-+1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
-+	movq (%rsp),%rcx
-+	CFI_RESTORE rcx
-+	movq 8(%rsp),%r11
-+	CFI_RESTORE r11
-+	addq $0x30,%rsp
-+	CFI_ADJUST_CFA_OFFSET -0x30
-+	pushq $0
-+	CFI_ADJUST_CFA_OFFSET 8
-+	SAVE_ALL
-+	jmp error_exit
-+	CFI_ENDPROC
-+#if 0	      
-+        .section __ex_table,"a"
-+        .align 8
-+        .quad gs_change,bad_gs
-+        .previous
-+        .section .fixup,"ax"
-+	/* running with kernelgs */
-+bad_gs: 
-+/*	swapgs		*/	/* switch back to user gs */
-+	xorl %eax,%eax
-+        movl %eax,%gs
-+        jmp  2b
-+        .previous       
-+#endif
-+	
-+/*
-+ * Create a kernel thread.
-+ *
-+ * C extern interface:
-+ *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+ *
-+ * asm input arguments:
-+ *	rdi: fn, rsi: arg, rdx: flags
-+ */
-+ENTRY(kernel_thread)
-+	CFI_STARTPROC
-+	FAKE_STACK_FRAME $child_rip
-+	SAVE_ALL
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_PCMCIA_NMCLAN=m
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_PCMCIA_AXNET=m
++CONFIG_PCMCIA_IBMTR=m
 +
-+	# rdi: flags, rsi: usp, rdx: will be &pt_regs
-+	movq %rdx,%rdi
-+	orq  kernel_thread_flags(%rip),%rdi
-+	movq $-1, %rsi
-+	movq %rsp, %rdx
++#
++# Wan interfaces
++#
++CONFIG_WAN=y
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_LANMEDIA=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=y
++CONFIG_HDLC_RAW_ETH=y
++CONFIG_HDLC_CISCO=y
++CONFIG_HDLC_FR=y
++CONFIG_HDLC_PPP=y
++CONFIG_HDLC_X25=y
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300=m
++CONFIG_PC300_MLPPP=y
++CONFIG_FARSYNC=m
++CONFIG_DLCI=m
++CONFIG_DLCI_COUNT=24
++CONFIG_DLCI_MAX=8
++CONFIG_WAN_ROUTER_DRIVERS=y
++CONFIG_CYCLADES_SYNC=m
++CONFIG_CYCLOMX_X25=y
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
 +
-+	xorl %r8d,%r8d
-+	xorl %r9d,%r9d
-+	
-+	# clone now
-+	call do_fork
-+	movq %rax,RAX(%rsp)
-+	xorl %edi,%edi
++#
++# ATM drivers
++#
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++CONFIG_ATM_NICSTAR_USE_SUNI=y
++CONFIG_ATM_NICSTAR_USE_IDT77105=y
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++CONFIG_ATM_IDT77252_RCV_ALL=y
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++CONFIG_ATM_FORE200E_PCA=y
++CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
++CONFIG_ATM_FORE200E_USE_TASKLET=y
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_FORE200E=m
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++CONFIG_ROADRUNNER_LARGE_RINGS=y
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_MPPE=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETPOLL=y
++CONFIG_NETPOLL_RX=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
 +
-+	/*
-+	 * It isn't worth to check for reschedule here,
-+	 * so internally to the x86_64 port you can rely on kernel_thread()
-+	 * not to reschedule the child before returning, this avoids the need
-+	 * of hacks for example to fork off the per-CPU idle tasks.
-+         * [Hopefully no generic code relies on the reschedule -AK]	
-+	 */
-+	RESTORE_ALL
-+	UNFAKE_STACK_FRAME
-+	ret
-+	CFI_ENDPROC
-+ENDPROC(kernel_thread)
-+	
-+child_rip:
-+	pushq $0		# fake return address
-+	CFI_STARTPROC
-+	/*
-+	 * Here we are in the child and the registers are set as they were
-+	 * at kernel_thread() invocation in the parent.
-+	 */
-+	movq %rdi, %rax
-+	movq %rsi, %rdi
-+	call *%rax
-+	# exit
-+	xorl %edi, %edi
-+	call do_exit
-+	CFI_ENDPROC
-+ENDPROC(child_rip)
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
 +
-+/*
-+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-+ *
-+ * C extern interface:
-+ *	 extern long execve(char *name, char **argv, char **envp)
-+ *
-+ * asm input arguments:
-+ *	rdi: name, rsi: argv, rdx: envp
-+ *
-+ * We want to fallback into:
-+ *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
-+ *
-+ * do_sys_execve asm fallback arguments:
-+ *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack
-+ */
-+ENTRY(execve)
-+	CFI_STARTPROC
-+	FAKE_STACK_FRAME $0
-+	SAVE_ALL	
-+	call sys_execve
-+	movq %rax, RAX(%rsp)	
-+	RESTORE_REST
-+	testq %rax,%rax
-+	jne 1f
-+        jmp int_ret_from_sys_call
-+1:      RESTORE_ARGS
-+	UNFAKE_STACK_FRAME
-+	ret
-+	CFI_ENDPROC
-+ENDPROC(execve)
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
 +
-+KPROBE_ENTRY(page_fault)
-+	errorentry do_page_fault
-+END(page_fault)
-+	.previous .text
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
 +
-+ENTRY(coprocessor_error)
-+	zeroentry do_coprocessor_error
-+END(coprocessor_error)
++#
++# ISDN4Linux hardware drivers
++#
 +
-+ENTRY(simd_coprocessor_error)
-+	zeroentry do_simd_coprocessor_error	
-+END(simd_coprocessor_error)
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
 +
-+ENTRY(device_not_available)
-+	zeroentry math_state_restore
-+END(device_not_available)
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
 +
-+	/* runs on exception stack */
-+KPROBE_ENTRY(debug)
-+/* 	INTR_FRAME
-+	pushq $0
-+	CFI_ADJUST_CFA_OFFSET 8	*/
-+	zeroentry do_debug
-+/*	paranoidexit
-+	CFI_ENDPROC */
-+END(debug)
-+	.previous .text
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
 +
-+#if 0
-+	/* runs on exception stack */	
-+KPROBE_ENTRY(nmi)
-+	INTR_FRAME
-+	pushq $-1
-+	CFI_ADJUST_CFA_OFFSET 8
-+	paranoidentry do_nmi, 0, 0
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+	paranoidexit 0
-+#else
-+	jmp paranoid_exit1
-+ 	CFI_ENDPROC
-+#endif
-+END(nmi)
-+	.previous .text
-+#endif        
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
 +
-+KPROBE_ENTRY(int3)
-+/* 	INTR_FRAME
-+ 	pushq $0
-+ 	CFI_ADJUST_CFA_OFFSET 8 */
-+ 	zeroentry do_int3
-+/* 	jmp paranoid_exit1
-+ 	CFI_ENDPROC */
-+END(int3)
-+	.previous .text
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
 +
-+ENTRY(overflow)
-+	zeroentry do_overflow
-+END(overflow)
++#
++# Active cards
++#
 +
-+ENTRY(bounds)
-+	zeroentry do_bounds
-+END(bounds)
++#
++# Siemens Gigaset
++#
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++# CONFIG_GIGASET_UNDOCREQ is not set
 +
-+ENTRY(invalid_op)
-+	zeroentry do_invalid_op	
-+END(invalid_op)
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
 +
-+ENTRY(coprocessor_segment_overrun)
-+	zeroentry do_coprocessor_segment_overrun
-+END(coprocessor_segment_overrun)
++#
++# CAPI hardware drivers
++#
 +
-+ENTRY(reserved)
-+	zeroentry do_reserved
-+END(reserved)
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
 +
-+#if 0
-+	/* runs on exception stack */
-+ENTRY(double_fault)
-+	XCPT_FRAME
-+	paranoidentry do_double_fault
-+	jmp paranoid_exit1
-+	CFI_ENDPROC
-+END(double_fault)
-+#endif
++#
++# Active Eicon DIVA Server cards
++#
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
 +
-+ENTRY(invalid_TSS)
-+	errorentry do_invalid_TSS
-+END(invalid_TSS)
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
 +
-+ENTRY(segment_not_present)
-+	errorentry do_segment_not_present
-+END(segment_not_present)
++#
++# Input device support
++#
++CONFIG_INPUT=y
 +
-+	/* runs on exception stack */
-+ENTRY(stack_segment)
-+/*	XCPT_FRAME
-+	paranoidentry do_stack_segment */
-+	errorentry do_stack_segment
-+/*	jmp paranoid_exit1
-+	CFI_ENDPROC */
-+END(stack_segment)
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=m
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_WISTRON_BTNS=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=m
++CONFIG_SERIAL_8250_PCI=m
++CONFIG_SERIAL_8250_PNP=m
++# CONFIG_SERIAL_8250_CS is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=m
++CONFIG_SERIAL_JSM=m
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_IBMASR=m
++CONFIG_WAFER_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_SCx200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_SBC8360_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_MACHZ_WDT=m
++CONFIG_SBC_EPX_C3_WATCHDOG=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_INTEL=m
++CONFIG_HW_RANDOM_AMD=m
++CONFIG_HW_RANDOM_GEODE=m
++CONFIG_HW_RANDOM_VIA=m
++CONFIG_NVRAM=m
++CONFIG_RTC=m
++CONFIG_GEN_RTC=m
++CONFIG_GEN_RTC_X=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_SONYPI=m
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++CONFIG_AGP_EFFICEON=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++CONFIG_DRM_VIA=m
++CONFIG_DRM_SAVAGE=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_CARDMAN_4000=m
++CONFIG_CARDMAN_4040=m
++CONFIG_MWAVE=m
++CONFIG_SCx200_GPIO=m
++CONFIG_PC8736x_GPIO=m
++CONFIG_NSC_GPIO=m
++CONFIG_CS5535_GPIO=m
++CONFIG_RAW_DRIVER=m
++CONFIG_MAX_RAW_DEVS=256
++# CONFIG_HPET is not set
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_INFINEON=m
++CONFIG_TCG_XEN=m
++CONFIG_TELCLOCK=m
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
 +
-+KPROBE_ENTRY(general_protection)
-+	errorentry do_general_protection
-+END(general_protection)
-+	.previous .text
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_SCx200_I2C=m
++CONFIG_SCx200_I2C_SCL=12
++CONFIG_SCx200_I2C_SDA=13
++CONFIG_SCx200_ACB=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
 +
-+ENTRY(alignment_check)
-+	errorentry do_alignment_check
-+END(alignment_check)
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_DS1374=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCA9539=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_MAX6875=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
 +
-+ENTRY(divide_error)
-+	zeroentry do_divide_error
-+END(divide_error)
++#
++# SPI support
++#
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
 +
-+ENTRY(spurious_interrupt_bug)
-+	zeroentry do_spurious_interrupt_bug
-+END(spurious_interrupt_bug)
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
 +
-+#ifdef CONFIG_X86_MCE
-+	/* runs on exception stack */
-+ENTRY(machine_check)
-+	INTR_FRAME
-+	pushq $0
-+	CFI_ADJUST_CFA_OFFSET 8	
-+	paranoidentry do_machine_check
-+	jmp paranoid_exit1
-+	CFI_ENDPROC
-+END(machine_check)
-+#endif
++#
++# SPI Protocol Masters
++#
 +
-+/* Call softirq on interrupt stack. Interrupts are off. */
-+ENTRY(call_softirq)
-+	CFI_STARTPROC
-+	push %rbp
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_REL_OFFSET rbp,0
-+	mov  %rsp,%rbp
-+	CFI_DEF_CFA_REGISTER rbp
-+	incl %gs:pda_irqcount
-+	cmove %gs:pda_irqstackptr,%rsp
-+	push  %rbp			# backlink for old unwinder
-+	call __do_softirq
-+	leaveq
-+	CFI_DEF_CFA_REGISTER	rsp
-+	CFI_ADJUST_CFA_OFFSET   -8
-+	decl %gs:pda_irqcount
-+	ret
-+	CFI_ENDPROC
-+ENDPROC(call_softirq)
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_CON=y
 +
-+#ifdef CONFIG_STACK_UNWIND
-+ENTRY(arch_unwind_init_running)
-+	CFI_STARTPROC
-+	movq	%r15, R15(%rdi)
-+	movq	%r14, R14(%rdi)
-+	xchgq	%rsi, %rdx
-+	movq	%r13, R13(%rdi)
-+	movq	%r12, R12(%rdi)
-+	xorl	%eax, %eax
-+	movq	%rbp, RBP(%rdi)
-+	movq	%rbx, RBX(%rdi)
-+	movq	(%rsp), %rcx
-+	movq	%rax, R11(%rdi)
-+	movq	%rax, R10(%rdi)
-+	movq	%rax, R9(%rdi)
-+	movq	%rax, R8(%rdi)
-+	movq	%rax, RAX(%rdi)
-+	movq	%rax, RCX(%rdi)
-+	movq	%rax, RDX(%rdi)
-+	movq	%rax, RSI(%rdi)
-+	movq	%rax, RDI(%rdi)
-+	movq	%rax, ORIG_RAX(%rdi)
-+	movq	%rcx, RIP(%rdi)
-+	leaq	8(%rsp), %rcx
-+	movq	$__KERNEL_CS, CS(%rdi)
-+	movq	%rax, EFLAGS(%rdi)
-+	movq	%rcx, RSP(%rdi)
-+	movq	$__KERNEL_DS, SS(%rdi)
-+	jmpq	*%rdx
-+	CFI_ENDPROC
-+ENDPROC(arch_unwind_init_running)
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/genapic_xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/genapic_xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/genapic_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/genapic_xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,161 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Xen APIC subarch code.  Maximum 8 CPUs, logical delivery.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ *
-+ * Hacked to pieces for Xen by Chris Wright.
-+ */
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
-+#else
-+#include <asm/apic.h>
-+#include <asm/apicdef.h>
-+#include <asm/genapic.h>
-+#endif
-+#include <xen/evtchn.h>
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
 +
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
 +
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+	int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+	BUG_ON(irq < 0);
-+	notify_remote_via_irq(irq);
-+}
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=m
++CONFIG_HWMON_VID=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
 +
-+void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
-+{
-+	int cpu;
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
 +
-+	switch (shortcut) {
-+	case APIC_DEST_SELF:
-+		__send_IPI_one(smp_processor_id(), vector);
-+		break;
-+	case APIC_DEST_ALLBUT:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu == smp_processor_id())
-+				continue;
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
-+		}
-+		break;
-+	case APIC_DEST_ALLINC:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
-+		}
-+		break;
-+	default:
-+		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+		       vector);
-+		break;
-+	}
-+}
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
 +
-+static cpumask_t xen_target_cpus(void)
-+{
-+	return cpu_online_map;
-+}
++#
++# Video Capture Adapters
++#
 +
-+/*
-+ * Set up the logical destination ID.
-+ * Do nothing, not called now.
-+ */
-+static void xen_init_apic_ldr(void)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	return;
-+}
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_VIVI=m
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_MEYE=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++# CONFIG_VIDEO_SAA7134_OSS is not set
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_DVB_ALL_FRONTENDS=y
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_DVB_ALL_FRONTENDS=y
 +
-+static void xen_send_IPI_allbutself(int vector)
-+{
-+	/*
-+	 * if there are no other CPUs in the system then
-+	 * we get an APIC send error if we try to broadcast.
-+	 * thus we have to avoid sending IPIs in this case.
-+	 */
-+	Dprintk("%s\n", __FUNCTION__);
-+	if (num_online_cpus() > 1)
-+		xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
-+}
++#
++# Encoders and Decoders
++#
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_TLV320AIC23B=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_CX25840=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
 +
-+static void xen_send_IPI_all(int vector)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-+}
++#
++# V4L USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_24XXX=y
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_USBVIDEO=m
++CONFIG_USB_VICAM=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_QUICKCAM_MESSENGER=m
++CONFIG_USB_ET61X251=m
++CONFIG_VIDEO_OVCAMCHIP=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_ZC0301=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
 +
-+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
-+{
-+	unsigned long mask = cpus_addr(cpumask)[0];
-+	unsigned int cpu;
-+	unsigned long flags;
++#
++# Radio Adapters
++#
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_USB_DSBR=m
 +
-+	Dprintk("%s\n", __FUNCTION__);
-+	local_irq_save(flags);
-+	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
 +
-+	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+		if (cpu_isset(cpu, cpumask)) {
-+			__send_IPI_one(cpu, vector);
-+		}
-+	}
-+	local_irq_restore(flags);
-+}
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
 +
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static int xen_apic_id_registered(void)
-+{
-+	/* better be set */
-+	Dprintk("%s\n", __FUNCTION__);
-+	return physid_isset(smp_processor_id(), phys_cpu_present_map);
-+}
-+#endif
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_CINERGYT2=m
++# CONFIG_DVB_CINERGYT2_TUNING is not set
 +
-+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
-+}
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
 +
-+static unsigned int phys_pkg_id(int index_msb)
-+{
-+	u32 ebx;
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
 +
-+	Dprintk("%s\n", __FUNCTION__);
-+	ebx = cpuid_ebx(1);
-+	return ((ebx >> 24) & 0xFF) >> index_msb;
-+}
++#
++# Supported Pluto2 Adapters
++#
++CONFIG_DVB_PLUTO2=m
 +
-+struct genapic apic_xen =  {
-+	.name = "xen",
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	.int_delivery_mode = dest_LowestPrio,
-+#endif
-+	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
-+	.int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
-+	.target_cpus = xen_target_cpus,
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	.apic_id_registered = xen_apic_id_registered,
-+#endif
-+	.init_apic_ldr = xen_init_apic_ldr,
-+	.send_IPI_all = xen_send_IPI_all,
-+	.send_IPI_allbutself = xen_send_IPI_allbutself,
-+	.send_IPI_mask = xen_send_IPI_mask,
-+	.cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
-+	.phys_pkg_id = phys_pkg_id,
-+};
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/genapic-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/genapic-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/genapic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/genapic-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,143 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Generic APIC sub-arch probe layer.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ */
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
++#
++# Supported DVB Frontends
++#
 +
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
++#
++# Customise DVB Frontends
++#
 +
-+#if defined(CONFIG_ACPI)
-+#include <acpi/acpi_bus.h>
-+#endif
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_S5H1420=m
 +
-+/* which logical CPU number maps to which CPU (physical APIC ID) */
-+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
 +
-+extern struct genapic apic_cluster;
-+extern struct genapic apic_flat;
-+extern struct genapic apic_physflat;
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
 +
-+#ifndef CONFIG_XEN
-+struct genapic *genapic = &apic_flat;
-+#else
-+extern struct genapic apic_xen;
-+struct genapic *genapic = &apic_xen;
-+#endif
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
 +
++#
++# Miscellaneous devices
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_ISL6421=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BUF_DVB=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_USB_DABUSB=m
 +
-+/*
-+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
-+ */
-+void __init clustered_apic_check(void)
-+{
-+#ifndef CONFIG_XEN
-+	long i;
-+	u8 clusters, max_cluster;
-+	u8 id;
-+	u8 cluster_cnt[NUM_APIC_CLUSTERS];
-+	int max_apic = 0;
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_ARC=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++CONFIG_FB_VESA=y
++CONFIG_FB_HGA=m
++# CONFIG_FB_HGA_ACCEL is not set
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++CONFIG_FB_RIVA_DEBUG=y
++CONFIG_FB_I810=m
++# CONFIG_FB_I810_GTF is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++# CONFIG_FB_MATROX_I2C is not set
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_CYBLA=m
++CONFIG_FB_TRIDENT=m
++# CONFIG_FB_TRIDENT_ACCEL is not set
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX=m
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_VIRTUAL=m
 +
-+#if defined(CONFIG_ACPI)
-+	/*
-+	 * Some x86_64 machines use physical APIC mode regardless of how many
-+	 * procs/clusters are present (x86_64 ES7000 is an example).
-+	 */
-+	if (acpi_fadt.revision > FADT2_REVISION_ID)
-+		if (acpi_fadt.force_apic_physical_destination_mode) {
-+			genapic = &apic_cluster;
-+			goto print;
-+		}
-+#endif
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_VIDEO_SELECT=y
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=m
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
 +
-+	memset(cluster_cnt, 0, sizeof(cluster_cnt));
-+	for (i = 0; i < NR_CPUS; i++) {
-+		id = bios_cpu_apicid[i];
-+		if (id == BAD_APICID)
-+			continue;
-+		if (id > max_apic)
-+			max_apic = id;
-+		cluster_cnt[APIC_CLUSTERID(id)]++;
-+	}
++#
++# Logo configuration
++#
++# CONFIG_LOGO is not set
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++CONFIG_BACKLIGHT_DEVICE=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_DEVICE=y
 +
-+	/* Don't use clustered mode on AMD platforms. */
-+ 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-+		genapic = &apic_physflat;
-+#ifndef CONFIG_HOTPLUG_CPU
-+		/* In the CPU hotplug case we cannot use broadcast mode
-+		   because that opens a race when a CPU is removed.
-+		   Stay at physflat mode in this case.
-+		   It is bad to do this unconditionally though. Once
-+		   we have ACPI platform support for CPU hotplug
-+		   we should detect hotplug capablity from ACPI tables and
-+		   only do this when really needed. -AK */
-+		if (max_apic <= 8)
-+			genapic = &apic_flat;
-+#endif
-+ 		goto print;
-+ 	}
++#
++# Sound
++#
++CONFIG_SOUND=m
 +
-+	clusters = 0;
-+	max_cluster = 0;
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
 +
-+	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
-+		if (cluster_cnt[i] > 0) {
-+			++clusters;
-+			if (cluster_cnt[i] > max_cluster)
-+				max_cluster = cluster_cnt[i];
-+		}
-+	}
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_AC97_BUS=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
 +
-+	/*
-+	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
-+	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
-+	 * else physical mode.
-+	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
-+	 * can ignore the clustered logical case and go straight to physical.)
-+	 */
-+	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
-+#ifdef CONFIG_HOTPLUG_CPU
-+		/* Don't use APIC shortcuts in CPU hotplug to avoid races */
-+		genapic = &apic_physflat;
-+#else
-+		genapic = &apic_flat;
-+#endif
-+	} else
-+		genapic = &apic_cluster;
++#
++# PCI devices
++#
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS5535AUDIO=m
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_FM801=m
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++# CONFIG_SND_VIA82XX_MODEM is not set
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
 +
-+print:
-+#else
-+	/* hardcode to xen apic functions */
-+	genapic = &apic_xen;
-+#endif
-+	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
-+}
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
 +
-+/* Same for both flat and clustered. */
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_PDAUDIOCF=m
 +
-+#ifdef CONFIG_XEN
-+extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
-+#endif
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=m
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++CONFIG_SOUND_BT878=m
++CONFIG_SOUND_ES1371=m
++CONFIG_SOUND_ICH=m
++CONFIG_SOUND_TRIDENT=m
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_SOUND_VIA82CXXX=m
++# CONFIG_MIDI_VIA82CXXX is not set
++# CONFIG_SOUND_OSS is not set
++CONFIG_SOUND_TVMIXER=m
 +
-+void send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+	__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+#else
-+	xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/head64-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/head64-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/head64-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/head64-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,162 @@
-+/*
-+ *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
-+ *
-+ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *	Modified for Xen.
-+ */
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=m
++# CONFIG_USB_DEBUG is not set
 +
-+#include <linux/init.h>
-+#include <linux/linkage.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/percpu.h>
-+#include <linux/module.h>
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
 +
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/bootsetup.h>
-+#include <asm/setup.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/sections.h>
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=m
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_CS=m
 +
-+unsigned long start_pfn;
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
 +
-+/* Don't add a printk in there. printk relies on the PDA which is not initialized 
-+   yet. */
-+#if 0
-+static void __init clear_bss(void)
-+{
-+	memset(__bss_start, 0,
-+	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
-+}
-+#endif
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
 +
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC            0xA33F
-+#define OLD_CL_BASE_ADDR        0x90000
-+#define OLD_CL_OFFSET           0x90022
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++CONFIG_USB_STORAGE_ALAUDA=y
++CONFIG_USB_LIBUSUAL=y
 +
-+extern char saved_command_line[];
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++CONFIG_HID_FF=y
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_USB_HIDDEV=y
 +
-+static void __init copy_bootdata(char *real_mode_data)
-+{
-+#ifndef CONFIG_XEN
-+	int new_data;
-+	char * command_line;
++#
++# USB HID Boot Protocol drivers
++#
++# CONFIG_USB_KBD is not set
++# CONFIG_USB_MOUSE is not set
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_ACECAD=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_TOUCHSCREEN=m
++CONFIG_USB_TOUCHSCREEN_EGALAX=y
++CONFIG_USB_TOUCHSCREEN_PANJIT=y
++CONFIG_USB_TOUCHSCREEN_3M=y
++CONFIG_USB_TOUCHSCREEN_ITM=y
++CONFIG_USB_YEALINK=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++CONFIG_USB_ATI_REMOTE2=m
++CONFIG_USB_KEYSPAN_REMOTE=m
++CONFIG_USB_APPLETOUCH=m
 +
-+	memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
-+	new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
-+	if (!new_data) {
-+		if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
-+			printk("so old bootloader that it does not support commandline?!\n");
-+			return;
-+		}
-+		new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
-+		printk("old bootloader convention, maybe loadlin?\n");
-+	}
-+	command_line = (char *) ((u64)(new_data));
-+	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
-+#else
-+	int max_cmdline;
-+	
-+	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+		max_cmdline = COMMAND_LINE_SIZE;
-+	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+	saved_command_line[max_cmdline-1] = '\0';
-+#endif
-+	printk("Bootdata ok (command line is %s)\n", saved_command_line);
-+}
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
 +
-+static void __init setup_boot_cpu_data(void)
-+{
-+	unsigned int dummy, eax;
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++# CONFIG_USB_EPSON2888 is not set
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_MON=y
 +
-+	/* get vendor info */
-+	cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
 +
-+	/* get cpu type */
-+	cpuid(1, &eax, &dummy, &dummy,
-+		(unsigned int *) &boot_cpu_data.x86_capability);
-+	boot_cpu_data.x86 = (eax >> 8) & 0xf;
-+	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
-+	boot_cpu_data.x86_mask = eax & 0xf;
-+}
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
 +
-+#include <xen/interface/memory.h>
-+unsigned long *machine_to_phys_mapping;
-+EXPORT_SYMBOL(machine_to_phys_mapping);
-+unsigned int machine_to_phys_order;
-+EXPORT_SYMBOL(machine_to_phys_order);
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_USB_LD=m
++CONFIG_USB_TEST=m
 +
-+void __init x86_64_start_kernel(char * real_mode_data)
-+{
-+	struct xen_machphys_mapping mapping;
-+	unsigned long machine_to_phys_nr_ents;
-+	char *s;
-+	int i;
++#
++# USB DSL modem support
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
 +
-+	setup_xen_features();
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_SELECTED=y
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=m
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_AT91 is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_G_SERIAL=m
 +
-+	xen_start_info = (struct start_info *)real_mode_data;
-+	if (!xen_feature(XENFEAT_auto_translated_physmap))
-+		phys_to_machine_mapping =
-+			(unsigned long *)xen_start_info->mfn_list;
-+	start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
-+		xen_start_info->nr_pt_frames;
++#
++# MMC/SD Card support
++#
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_WBSD=m
 +
-+	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
-+	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
-+	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
-+		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
-+		machine_to_phys_nr_ents = mapping.max_mfn + 1;
-+	}
-+	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
-+		machine_to_phys_order++;
++#
++# LED devices
++#
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=m
 +
-+#if 0
-+	for (i = 0; i < 256; i++)
-+		set_intr_gate(i, early_idt_handler);
-+	asm volatile("lidt %0" :: "m" (idt_descr));
-+#endif
++#
++# LED drivers
++#
++CONFIG_LEDS_NET48XX=m
 +
-+	/*
-+	 * This must be called really, really early:
-+	 */
-+	lockdep_init();
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 +
-+ 	for (i = 0; i < NR_CPUS; i++)
-+ 		cpu_pda(i) = &boot_cpu_pda[i];
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=m
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_ISER=m
 +
-+	pda_init(0);
-+	copy_bootdata(real_mode_data);
-+#ifdef CONFIG_SMP
-+	cpu_set(0, cpu_online_map);
-+#endif
-+	s = strstr(saved_command_line, "earlyprintk=");
-+	if (s != NULL)
-+		setup_early_printk(strchr(s, '=') + 1);
-+#ifdef CONFIG_NUMA
-+	s = strstr(saved_command_line, "numa=");
-+	if (s != NULL)
-+		numa_setup(s+5);
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	if (strstr(saved_command_line, "disableapic"))
-+		disable_apic = 1;
-+#endif
-+	/* You need early console to see that */
-+	if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
-+		panic("Kernel too big for kernel mapping\n");
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++CONFIG_EDAC=m
 +
-+	setup_boot_cpu_data();
-+	start_kernel();
-+}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/head-xen.S tmp-linux-2.6-xen.patch/arch/x86_64/kernel/head-xen.S
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/head-xen.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,203 @@
-+/*
-+ *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
-+ *
-+ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *  Copyright (C) 2000 Pavel Machek <pavel at suse.cz>
-+ *  Copyright (C) 2000 Karsten Keil <kkeil at suse.de>
-+ *  Copyright (C) 2001,2002 Andi Kleen <ak at suse.de>
-+ *
-+ *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *    Modified for Xen                                
-+ */
++#
++# Reporting subsystems
++#
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_MM_EDAC=m
++CONFIG_EDAC_AMD76X=m
++CONFIG_EDAC_E7XXX=m
++CONFIG_EDAC_E752X=m
++CONFIG_EDAC_I82875P=m
++CONFIG_EDAC_I82860=m
++CONFIG_EDAC_R82600=m
++CONFIG_EDAC_POLL=y
 +
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
 +
-+#include <linux/linkage.h>
-+#include <linux/threads.h>
-+#include <linux/init.h>
-+#include <linux/elfnote.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/msr.h>
-+#include <asm/cache.h>
-+#include <asm/dwarf2.h>
-+#include <xen/interface/elfnote.h>
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 +
-+	.section .bootstrap.text, "ax", @progbits
-+	.code64
-+#define VIRT_ENTRY_OFFSET 0x0
-+.org VIRT_ENTRY_OFFSET
-+	.globl startup_64
-+startup_64:
-+ENTRY(_start)
-+	movq $(init_thread_union+THREAD_SIZE-8),%rsp
++#
++# RTC drivers
++#
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_V3020=m
 +
-+	/* rsi is pointer to startup info structure.
-+	   pass it to C */
-+	movq %rsi,%rdi
-+	pushq $0		# fake return address
-+	jmp x86_64_start_kernel
++#
++# DMA Engine support
++#
++CONFIG_DMA_ENGINE=y
 +
-+ENTRY(stext)
-+ENTRY(_stext)
++#
++# DMA Clients
++#
++CONFIG_NET_DMA=y
 +
-+	$page = 0
-+#define NEXT_PAGE(name) \
-+	$page = $page + 1; \
-+	.org $page * 0x1000; \
-+	phys_##name = $page * 0x1000 + __PHYSICAL_START; \
-+ENTRY(name)
++#
++# DMA Devices
++#
++CONFIG_INTEL_IOATDMA=m
 +
-+NEXT_PAGE(init_level4_pgt)
-+	/* This gets initialized in x86_64_start_kernel */
-+	.fill	512,8,0
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_QUOTA=y
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=m
 +
-+        /*
-+         * We update two pgd entries to make kernel and user pgd consistent
-+         * at pgd_populate(). It can be used for kernel modules. So we place 
-+         * this page here for those cases to avoid memory corruption.
-+         * We also use this page to establish the initiali mapping for
-+         * vsyscall area.
-+         */
-+NEXT_PAGE(init_level4_user_pgt)
-+	.fill	512,8,0
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=m
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
 +
-+NEXT_PAGE(level3_kernel_pgt)
-+	.fill	512,8,0
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
 +
-+        /*
-+         * This is used for vsyscall area mapping as we have a different
-+         * level4 page table for user.
-+         */
-+NEXT_PAGE(level3_user_pgt)
-+        .fill	512,8,0
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++CONFIG_CONFIGFS_FS=m
 +
-+NEXT_PAGE(level2_kernel_pgt)
-+	.fill	512,8,0
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=m
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
 +
-+NEXT_PAGE(hypercall_page)
-+	CFI_STARTPROC
-+	.rept 0x1000 / 0x20
-+	.skip 1 /* push %rcx */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_REL_OFFSET	rcx,0
-+	.skip 2 /* push %r11 */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_REL_OFFSET	rcx,0
-+	.skip 5 /* mov $#,%eax */
-+	.skip 2 /* syscall */
-+	.skip 2 /* pop %r11 */
-+	CFI_ADJUST_CFA_OFFSET -8
-+	CFI_RESTORE r11
-+	.skip 1 /* pop %rcx */
-+	CFI_ADJUST_CFA_OFFSET -8
-+	CFI_RESTORE rcx
-+	.align 0x20,0 /* ret */
-+	.endr
-+	CFI_ENDPROC
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp850"
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++CONFIG_CIFS_STATS2=y
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++CONFIG_CIFS_XATTR=y
++# CONFIG_CIFS_POSIX is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++CONFIG_9P_FS=m
 +
-+#undef NEXT_PAGE
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++# CONFIG_AMIGA_PARTITION is not set
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++# CONFIG_MINIX_SUBPARTITION is not set
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_EFI_PARTITION=y
 +
-+	.data
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
 +
-+	.align 16
-+	.globl cpu_gdt_descr
-+cpu_gdt_descr:
-+	.word	gdt_end-cpu_gdt_table-1
-+gdt:
-+	.quad	cpu_gdt_table
-+#ifdef CONFIG_SMP
-+	.rept	NR_CPUS-1
-+	.word	0
-+	.quad	0
-+	.endr
-+#endif
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
 +
-+/* We need valid kernel segments for data and code in long mode too
-+ * IRET will check the segment types  kkeil 2000/10/28
-+ * Also sysret mandates a special GDT layout 
-+ */
-+		 		
-+	.section .data.page_aligned, "aw"
-+	.align PAGE_SIZE
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++CONFIG_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
 +
-+/* The TLS descriptors are currently at a different place compared to i386.
-+   Hopefully nobody expects them at a fixed place (Wine?) */
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++# CONFIG_SECURITY_NETWORK_XFRM is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++# CONFIG_SECURITY_SELINUX is not set
 +
-+ENTRY(cpu_gdt_table)
-+	.quad	0x0000000000000000	/* NULL descriptor */
-+	.quad	0x0			/* unused */
-+	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
-+	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
-+	.quad	0x00cffa000000ffff	/* __USER32_CS */
-+	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */
-+	.quad	0x00affa000000ffff	/* __USER_CS */
-+	.quad	0x00cf9a000000ffff	/* __KERNEL32_CS */
-+	.quad	0,0			/* TSS */
-+	.quad	0,0			/* LDT */
-+	.quad   0,0,0			/* three TLS descriptors */
-+	.quad	0			/* unused */
-+gdt_end:
-+	/* asm/segment.h:GDT_ENTRIES must match this */
-+	/* This should be a multiple of the cache line size */
-+	/* GDTs of other CPUs are now dynamically allocated */
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES=m
++CONFIG_CRYPTO_AES_586=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
 +
-+	/* zero the remaining page */
-+	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
 +
-+	.section .bss.page_aligned, "aw", @nobits
-+	.align PAGE_SIZE
-+ENTRY(empty_zero_page)
-+	.skip PAGE_SIZE
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=m
++CONFIG_XEN_PCIDEV_BACKEND_VPCI=y
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoh value
-+ .if (\value) < 0 || (\value) >= 0x10
-+	utoh (((\value)>>4)&0x0fffffffffffffff)
-+ .endif
-+ .if ((\value) & 0xf) < 10
-+  .byte '0' + ((\value) & 0xf)
-+ .else
-+  .byte 'A' + ((\value) & 0xf) - 10
-+ .endif
-+.endm
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC16=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_AUDIT_GENERIC=y
++CONFIG_ZLIB_INFLATE=m
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -r d894e36cfc30 -r 0aa021803deb buildconfigs/linux-defconfig_xen_x86_64
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/buildconfigs/linux-defconfig_xen_x86_64	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3146 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:38:24 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 +
-+.section __xen_guest
-+	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
-+	.ascii	",XEN_VER=xen-3.0"
-+	.ascii	",VIRT_BASE=0x"
-+		utoh __START_KERNEL_map
-+	.ascii	",ELF_PADDR_OFFSET=0x"
-+		utoh __START_KERNEL_map
-+	.ascii	",VIRT_ENTRY=0x"
-+		utoh (__START_KERNEL_map + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
-+	.ascii	",HYPERCALL_PAGE=0x"
-+		utoh (phys_hypercall_page >> PAGE_SHIFT)
-+	.ascii  ",FEATURES=writable_page_tables"
-+	.ascii		 "|writable_descriptor_tables"
-+	.ascii		 "|auto_translated_physmap"
-+	.ascii	         "|supervisor_mode_kernel"
-+	.ascii	",LOADER=generic"
-+	.byte	0
-+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
-+	
-+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz, "linux")
-+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz, "2.6")
-+	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz, "xen-3.0")
-+	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      .quad,  __START_KERNEL_map)
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .quad,  __START_KERNEL_map)
-+#else
-+	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .quad,  0)
-+#endif
-+	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .quad,  startup_64)
-+	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad,  hypercall_page)
-+	ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,   .quad,  _PAGE_PRESENT,_PAGE_PRESENT)
-+	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
-+	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
-+	ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long,  1)
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/init_task.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/init_task.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/init_task.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/init_task.c	2007-07-30 16:35:11.000000000 +0200
-@@ -37,6 +37,8 @@ union thread_union init_thread_union 
- struct task_struct init_task = INIT_TASK(init_task);
- 
- EXPORT_SYMBOL(init_task);
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
 +
-+#ifndef CONFIG_X86_NO_TSS
- /*
-  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-  * no more per-task TSS's. The TSS size is kept cacheline-aligned
-@@ -50,5 +52,6 @@ DEFINE_PER_CPU(struct tss_struct, init_t
-  * debugging, no special alignment required.
-  */
- DEFINE_PER_CPU(struct orig_ist, orig_ist);
-+#endif
- 
- #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/io_apic-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/io_apic-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/io_apic-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,2269 @@
-+/*
-+ *	Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ *	Many thanks to Stig Venaas for trying out countless experimental
-+ *	patches and reporting/debugging problems patiently!
-+ *
-+ *	(c) 1999, Multiple IO-APIC support, developed by
-+ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
-+ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
-+ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
-+ *	and Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively
-+ *	Paul Diefenbaugh	:	Added full ACPI support
-+ */
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_CPUSETS=y
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
 +
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/sysdev.h>
-+#ifdef CONFIG_ACPI
-+#include <acpi/acpi_bus.h>
-+#endif
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
 +
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/mach_apic.h>
-+#include <asm/acpi.h>
-+#include <asm/dma.h>
-+#include <asm/nmi.h>
++#
++# Block layer
++#
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
 +
-+#define __apicdebuginit  __init
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
 +
-+int sis_apic_bug; /* not actually supported, dummy for compile */
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=m
++CONFIG_X86_CPUID=m
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_MTRR=y
++CONFIG_SMP=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=32
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_K8_NB=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
 +
-+static int no_timer_check;
++#
++# Power management options
++#
++CONFIG_PM=y
++# CONFIG_PM_LEGACY is not set
++CONFIG_PM_DEBUG=y
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
 +
-+int disable_timer_pin_1 __initdata;
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_PV_SLEEP=y
 +
-+#ifndef CONFIG_XEN
-+int timer_over_8254 __initdata = 0;
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
 +
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+#endif
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++# CONFIG_PCI_MMCONFIG is not set
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_DEBUG is not set
 +
-+static DEFINE_SPINLOCK(ioapic_lock);
-+static DEFINE_SPINLOCK(vector_lock);
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_PCMCIA_IOCTL=y
++CONFIG_CARDBUS=y
 +
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_PCCARD_NONSTATIC=m
 +
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
 +
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=m
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
 +
-+static struct irq_pin_list {
-+	short apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
++#
++# Networking
++#
++CONFIG_NET=y
 +
-+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) 	\
-+	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector)	(vector)
-+#endif
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_TCP_CONG_ADVANCED=y
 +
-+#ifdef CONFIG_XEN
++#
++# TCP congestion control
++#
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
 +
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
 +
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq)  ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
 +
-+unsigned long io_apic_irqs;
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
 +
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+	struct physdev_apic apic_op;
-+	int ret;
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++# CONFIG_IPV6_ROUTER_PREF is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
 +
-+	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+	apic_op.reg = reg;
-+	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
-+	if (ret)
-+		return ret;
-+	return apic_op.value;
-+}
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_XTABLES=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 +
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+	struct physdev_apic apic_op;
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_SECMARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=m
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_NETBIOS_NS=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_PPTP=m
++CONFIG_IP_NF_H323=m
++CONFIG_IP_NF_SIP=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_NAT_PPTP=m
++CONFIG_IP_NF_NAT_H323=m
++CONFIG_IP_NF_NAT_SIP=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
 +
-+	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+	apic_op.reg = reg;
-+	apic_op.value = value;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
-+}
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_RAW=m
 +
-+#define io_apic_read(a,r)    xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++#
++# DECnet: Netfilter Configuration
++#
++# CONFIG_DECNET_NF_GRABULATOR is not set
 +
-+#define clear_IO_APIC() ((void)0)
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_ULOG=m
 +
-+#else
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++CONFIG_IP_DCCP_ACKVEC=y
++
++#
++# DCCP CCIDs Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP_CCID2=m
++CONFIG_IP_DCCP_CCID3=m
++CONFIG_IP_DCCP_TFRC_LIB=m
++
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++CONFIG_TIPC=m
++# CONFIG_TIPC_ADVANCED is not set
++# CONFIG_TIPC_DEBUG is not set
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++# CONFIG_ATM_MPOA is not set
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++CONFIG_IPX_INTERN=y
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_ECONET=m
++# CONFIG_ECONET_AUNUDP is not set
++# CONFIG_ECONET_NATIVE is not set
++CONFIG_WAN_ROUTER=m
 +
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
-+{
-+	unsigned long flags;
-+	unsigned int dest;
-+	cpumask_t tmp;
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
 +
-+	cpus_and(tmp, mask, cpu_online_map);
-+	if (cpus_empty(tmp))
-+		tmp = TARGET_CPUS;
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
 +
-+	cpus_and(mask, tmp, CPU_MASK_ALL);
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++CONFIG_NET_CLS_IND=y
++CONFIG_NET_ESTIMATOR=y
 +
-+	dest = cpu_mask_to_apicid(mask);
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_HAMRADIO=y
 +
-+	/*
-+	 * Only the high 8 bits are valid.
-+	 */
-+	dest = SET_APIC_LOGICAL_ID(dest);
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++CONFIG_NETROM=m
++CONFIG_ROSE=m
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__DO_ACTION(1, = dest, )
-+	set_irq_info(irq, mask);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+#endif
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
 +
-+#endif /* !CONFIG_XEN */
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++CONFIG_IRDA_ULTRA=y
 +
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+	static int first_free_entry = NR_IRQS;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
 +
-+	BUG_ON(irq >= NR_IRQS);
-+	while (entry->next)
-+		entry = irq_2_pin + entry->next;
++#
++# Infrared-port device drivers
++#
 +
-+	if (entry->pin != -1) {
-+		entry->next = first_free_entry;
-+		entry = irq_2_pin + entry->next;
-+		if (++first_free_entry >= PIN_MAP_SIZE)
-+			panic("io_apic.c: ran out of irq_2_pin entries!");
-+	}
-+	entry->apic = apic;
-+	entry->pin = pin;
-+}
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
 +
-+#ifndef CONFIG_XEN
-+#define __DO_ACTION(R, ACTION, FINAL)					\
-+									\
-+{									\
-+	int pin;							\
-+	struct irq_pin_list *entry = irq_2_pin + irq;			\
-+									\
-+	BUG_ON(irq >= NR_IRQS);						\
-+	for (;;) {							\
-+		unsigned int reg;					\
-+		pin = entry->pin;					\
-+		if (pin == -1)						\
-+			break;						\
-+		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
-+		reg ACTION;						\
-+		io_apic_modify(entry->apic, reg);			\
-+		if (!entry->next)					\
-+			break;						\
-+		entry = irq_2_pin + entry->next;			\
-+	}								\
-+	FINAL;								\
-+}
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
 +
-+#define DO_ACTION(name,R,ACTION, FINAL)					\
-+									\
-+	static void name##_IO_APIC_irq (unsigned int irq)		\
-+	__DO_ACTION(R, ACTION, FINAL)
++#
++# Old SIR device drivers
++#
 +
-+DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
-+						/* mask = 1 */
-+DO_ACTION( __unmask,           0, &= 0xfffeffff, )
-+						/* mask = 0 */
++#
++# Old Serial dongle support
++#
 +
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__mask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++# CONFIG_BT_HCIDTL1 is not set
++# CONFIG_BT_HCIBT3C is not set
++# CONFIG_BT_HCIBLUECARD is not set
++# CONFIG_BT_HCIBTUART is not set
++CONFIG_BT_HCIVHCI=m
++CONFIG_IEEE80211=m
++# CONFIG_IEEE80211_DEBUG is not set
++CONFIG_IEEE80211_CRYPT_WEP=m
++CONFIG_IEEE80211_CRYPT_CCMP=m
++CONFIG_IEEE80211_CRYPT_TKIP=m
++CONFIG_IEEE80211_SOFTMAC=m
++# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
++CONFIG_WIRELESS_EXT=y
 +
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
++#
++# Device Drivers
++#
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
 +
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
 +
-+	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	if (entry.delivery_mode == dest_SMI)
-+		return;
-+	/*
-+	 * Disable it in the IO-APIC irq-routing table:
-+	 */
-+	memset(&entry, 0, sizeof(entry));
-+	entry.mask = 1;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++CONFIG_MTD_CMDLINE_PARTS=y
 +
-+static void clear_IO_APIC (void)
-+{
-+	int apic, pin;
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
 +
-+	for (apic = 0; apic < nr_ioapics; apic++)
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+			clear_IO_APIC_pin(apic, pin);
-+}
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_GEOMETRY is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++# CONFIG_MTD_ROM is not set
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
 +
-+#endif /* !CONFIG_XEN */
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++# CONFIG_MTD_PNC2000 is not set
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++CONFIG_MTD_SCB2_FLASH=m
++# CONFIG_MTD_NETtel is not set
++# CONFIG_MTD_DILNETPC is not set
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PLATRAM=m
 +
-+static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++CONFIG_MTD_M25P80=m
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
 +
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++CONFIG_MTD_DOCPROBE_ADVANCED=y
++CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
++CONFIG_MTD_DOCPROBE_HIGH=y
++CONFIG_MTD_DOCPROBE_55AA=y
 +
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+int ioapic_force;
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
++CONFIG_MTD_NAND_NANDSIM=m
 +
-+/* dummy parsing: see setup.c */
++#
++# OneNAND Flash Device Drivers
++#
++CONFIG_MTD_ONENAND=m
++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
++# CONFIG_MTD_ONENAND_OTP is not set
 +
-+static int __init disable_ioapic_setup(char *str)
-+{
-+	skip_ioapic_setup = 1;
-+	return 1;
-+}
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++# CONFIG_PARPORT_PC_PCMCIA is not set
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
 +
-+static int __init enable_ioapic_setup(char *str)
-+{
-+	ioapic_force = 1;
-+	skip_ioapic_setup = 0;
-+	return 1;
-+}
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
 +
-+__setup("noapic", disable_ioapic_setup);
-+__setup("apic", enable_ioapic_setup);
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
 +
-+#ifndef CONFIG_XEN
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+	timer_over_8254 = -1;
-+	return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+	timer_over_8254 = 2;
-+	return 1;
-+}
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
 +
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
-+#endif /* !CONFIG_XEN */
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
 +
-+#include <asm/pci-direct.h>
-+#include <linux/pci_ids.h>
-+#include <linux/pci.h>
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++CONFIG_ATA_OVER_ETH=m
 +
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
 +
-+#ifdef CONFIG_ACPI
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=m
++CONFIG_IDEDISK_MULTI_MODE=y
++# CONFIG_BLK_DEV_IDECS is not set
++CONFIG_BLK_DEV_IDECD=m
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=m
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
 +
-+static int nvidia_hpet_detected __initdata;
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=m
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_BLK_DEV_OFFBOARD=y
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_RZ1000=m
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=m
++CONFIG_BLK_DEV_ALI15X3=m
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=m
++CONFIG_BLK_DEV_ATIIXP=m
++CONFIG_BLK_DEV_CMD64X=m
++CONFIG_BLK_DEV_TRIFLEX=m
++CONFIG_BLK_DEV_CY82C693=m
++CONFIG_BLK_DEV_CS5520=m
++CONFIG_BLK_DEV_CS5530=m
++CONFIG_BLK_DEV_HPT34X=m
++CONFIG_HPT34X_AUTODMA=y
++CONFIG_BLK_DEV_HPT366=m
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=m
++CONFIG_BLK_DEV_IT821X=m
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=m
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=m
++CONFIG_BLK_DEV_SVWKS=m
++CONFIG_BLK_DEV_SIIMAGE=m
++CONFIG_BLK_DEV_SIS5513=m
++CONFIG_BLK_DEV_SLC90E66=m
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=m
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
 +
-+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
-+{
-+	nvidia_hpet_detected = 1;
-+	return 0;
-+}
-+#endif
++#
++# SCSI device support
++#
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
 +
-+/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
-+   off. Check for an Nvidia or VIA PCI bridge and turn it off.
-+   Use pci direct infrastructure because this runs before the PCI subsystem. 
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=m
++CONFIG_CHR_DEV_SCH=m
 +
-+   Can be overwritten with "apic"
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
 +
-+   And another hack to disable the IOMMU on VIA chipsets.
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
 +
-+   ... and others. Really should move this somewhere else.
++#
++# SCSI low-level drivers
++#
++CONFIG_ISCSI_TCP=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=4
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_SATA=m
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_MV=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_PDC_ADMA=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_SATA_QSTOR=m
++CONFIG_SCSI_SATA_PROMISE=m
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIL24=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_SCSI_LPFC=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++# CONFIG_SCSI_DEBUG is not set
 +
-+   Kludge-O-Rama. */
-+void __init check_ioapic(void) 
-+{ 
-+	int num,slot,func; 
-+	/* Poor man's PCI discovery */
-+	for (num = 0; num < 32; num++) { 
-+		for (slot = 0; slot < 32; slot++) { 
-+			for (func = 0; func < 8; func++) { 
-+				u32 class;
-+				u32 vendor;
-+				u8 type;
-+				class = read_pci_config(num,slot,func,
-+							PCI_CLASS_REVISION);
-+				if (class == 0xffffffff)
-+					break; 
++#
++# PCMCIA SCSI adapter support
++#
++# CONFIG_PCMCIA_FDOMAIN is not set
++# CONFIG_PCMCIA_QLOGIC is not set
++# CONFIG_PCMCIA_SYM53C500 is not set
 +
-+		       		if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
-+					continue; 
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_RAID5_RESHAPE=y
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
 +
-+				vendor = read_pci_config(num, slot, func, 
-+							 PCI_VENDOR_ID);
-+				vendor &= 0xffff;
-+				switch (vendor) { 
-+				case PCI_VENDOR_ID_VIA:
-+#ifdef CONFIG_IOMMU
-+					if ((end_pfn > MAX_DMA32_PFN ||
-+					     force_iommu) &&
-+					    !iommu_aperture_allowed) {
-+						printk(KERN_INFO
-+    "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
-+						iommu_aperture_disabled = 1;
-+					}
-+#endif
-+					return;
-+				case PCI_VENDOR_ID_NVIDIA:
-+#ifdef CONFIG_ACPI
-+					/*
-+					 * All timer overrides on Nvidia are
-+					 * wrong unless HPET is enabled.
-+					 */
-+					nvidia_hpet_detected = 0;
-+					acpi_table_parse(ACPI_HPET,
-+							nvidia_hpet_check);
-+					if (nvidia_hpet_detected == 0) {
-+						acpi_skip_timer_override = 1;
-+						printk(KERN_INFO "Nvidia board "
-+						    "detected. Ignoring ACPI "
-+						    "timer override.\n");
-+					}
-+#endif
-+					/* RED-PEN skip them on mptables too? */
-+					return;
-+				case PCI_VENDOR_ID_ATI:
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
 +
-+				/* This should be actually default, but
-+				   for 2.6.16 let's do it for ATI only where
-+				   it's really needed. */
-+#ifndef CONFIG_XEN
-+					if (timer_over_8254 == 1) {	
-+						timer_over_8254 = 0;	
-+					printk(KERN_INFO
-+		"ATI board detected. Disabling timer routing over 8254.\n");
-+					}	
-+#endif
-+					return;
-+				} 
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
 +
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++CONFIG_IEEE1394_OUI_DB=y
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++# CONFIG_IEEE1394_EXPORT_FULL_API is not set
 +
-+				/* No multi-function device? */
-+				type = read_pci_config_byte(num,slot,func,
-+							    PCI_HEADER_TYPE);
-+				if (!(type & 0x80))
-+					break;
-+			} 
-+		}
-+	}
-+} 
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
 +
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+	int i, max;
-+	int ints[MAX_PIRQS+1];
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
 +
-+	get_options(str, ARRAY_SIZE(ints), ints);
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
++CONFIG_I2O_EXT_ADAPTEC=y
++CONFIG_I2O_EXT_ADAPTEC_DMA64=y
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_CONFIG_OLD_IOCTL=y
++CONFIG_I2O_BUS=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
 +
-+	for (i = 0; i < MAX_PIRQS; i++)
-+		pirq_entries[i] = -1;
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
 +
-+	pirqs_enabled = 1;
-+	apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
-+	max = MAX_PIRQS;
-+	if (ints[0] < MAX_PIRQS)
-+		max = ints[0];
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++# CONFIG_ARCNET_COM20020 is not set
 +
-+	for (i = 0; i < max; i++) {
-+		apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+		/*
-+		 * PIRQs are mapped upside down, usually.
-+		 */
-+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+	}
-+	return 1;
-+}
++#
++# PHY device support
++#
++CONFIG_PHYLIB=m
 +
-+__setup("pirq=", ioapic_pirq_setup);
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_VITESSE_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_FIXED_PHY=m
++# CONFIG_FIXED_MII_10_FDX is not set
++# CONFIG_FIXED_MII_100_FDX is not set
 +
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+	int i;
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
 +
-+	for (i = 0; i < mp_irq_entries; i++)
-+		if (mp_irqs[i].mpc_irqtype == type &&
-+		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+		    mp_irqs[i].mpc_dstirq == pin)
-+			return i;
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_HP100=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++CONFIG_AMD8111E_NAPI=y
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_ADAPTEC_STARFIRE_NAPI=y
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_DGRS=m
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++# CONFIG_NET_POCKET is not set
 +
-+	return -1;
-+}
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++CONFIG_E1000_NAPI=y
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++CONFIG_R8169_NAPI=y
++CONFIG_R8169_VLAN=y
++CONFIG_SIS190=m
++CONFIG_SKGE=m
++CONFIG_SKY2=m
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
 +
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+	int i;
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_CHELSIO_T1=m
++CONFIG_IXGB=m
++CONFIG_IXGB_NAPI=y
++CONFIG_S2IO=m
++CONFIG_S2IO_NAPI=y
++CONFIG_MYRI10GE=m
++# CONFIG_SFC is not set
 +
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMOL=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_ABYSS=m
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++CONFIG_NET_WIRELESS_RTNETLINK=y
 +
-+			return mp_irqs[i].mpc_dstirq;
-+	}
-+	return -1;
-+}
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
 +
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+	int i;
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
 +
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++# CONFIG_IPW2200_RADIOTAP is not set
++# CONFIG_IPW2200_PROMISCUOUS is not set
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
-+			break;
-+	}
-+	if (i < mp_irq_entries) {
-+		int apic;
-+		for(apic = 0; apic < nr_ioapics; apic++) {
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+				return apic;
-+		}
-+	}
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++# CONFIG_PCMCIA_HERMES is not set
++# CONFIG_PCMCIA_SPECTRUM is not set
++# CONFIG_AIRO_CS is not set
++# CONFIG_PCMCIA_ATMEL is not set
++# CONFIG_PCMCIA_WL3501 is not set
 +
-+	return -1;
-+}
-+#endif
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_USB_ZD1201=m
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++# CONFIG_HOSTAP_CS is not set
++CONFIG_BCM43XX=m
++CONFIG_BCM43XX_DEBUG=y
++CONFIG_BCM43XX_DMA=y
++CONFIG_BCM43XX_PIO=y
++CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
++# CONFIG_BCM43XX_DMA_MODE is not set
++# CONFIG_BCM43XX_PIO_MODE is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_NET_WIRELESS=y
 +
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
++#
++# PCMCIA network device support
++#
++# CONFIG_NET_PCMCIA is not set
 +
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+	int apic, i, best_guess = -1;
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
 +
-+	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
-+		bus, slot, pin);
-+	if (mp_bus_id_to_pci_bus[bus] == -1) {
-+		apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+		return -1;
-+	}
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
++#
++# ATM drivers
++#
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++# CONFIG_ATM_ZATM is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++# CONFIG_ATM_FORE200E_PCA is not set
++CONFIG_ATM_HE=m
++# CONFIG_ATM_HE_USE_SUNI is not set
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++CONFIG_ROADRUNNER_LARGE_RINGS=y
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_MPPE=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETPOLL=y
++CONFIG_NETPOLL_RX=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
 +
-+		for (apic = 0; apic < nr_ioapics; apic++)
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+				break;
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+		    !mp_irqs[i].mpc_irqtype &&
-+		    (bus == lbus) &&
-+		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
 +
-+			if (!(apic || IO_APIC_IRQ(irq)))
-+				continue;
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
 +
-+			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+				return irq;
-+			/*
-+			 * Use the first all-but-pin matching entry as a
-+			 * best-guess fuzzy result for broken mptables.
-+			 */
-+			if (best_guess < 0)
-+				best_guess = irq;
-+		}
-+	}
-+	BUG_ON(best_guess >= NR_IRQS);
-+	return best_guess;
-+}
++#
++# ISDN4Linux hardware drivers
++#
 +
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+	if (irq < 16) {
-+		unsigned int port = 0x4d0 + (irq >> 3);
-+		return (inb(port) >> (irq & 7)) & 1;
-+	}
-+	apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
-+	return 0;
-+}
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
 +
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value.  If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++CONFIG_HISAX_NO_SENDCOMPLETE=y
++CONFIG_HISAX_NO_LLC=y
++CONFIG_HISAX_NO_KEYPAD=y
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
 +
-+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx)	(0)
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
 +
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
++#
++# HiSax PCMCIA card service modules
++#
++# CONFIG_HISAX_SEDLBAUER_CS is not set
++# CONFIG_HISAX_ELSA_CS is not set
++# CONFIG_HISAX_AVM_A1_CS is not set
++# CONFIG_HISAX_TELES_CS is not set
 +
-+#define default_ISA_trigger(idx)	(0)
-+#define default_ISA_polarity(idx)	(0)
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
 +
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
++#
++# Active cards
++#
 +
-+#define default_PCI_trigger(idx)	(1)
-+#define default_PCI_polarity(idx)	(1)
++#
++# Siemens Gigaset
++#
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++# CONFIG_GIGASET_UNDOCREQ is not set
 +
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
 +
-+#define default_MCA_trigger(idx)	(1)
-+#define default_MCA_polarity(idx)	(0)
++#
++# CAPI hardware drivers
++#
 +
-+static int __init MPBIOS_polarity(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int polarity;
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++# CONFIG_ISDN_DRV_AVMB1_AVM_CS is not set
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
 +
-+	/*
-+	 * Determine IRQ line polarity (high active or low active):
-+	 */
-+	switch (mp_irqs[idx].mpc_irqflag & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent polarity */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					polarity = default_ISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					polarity = default_EISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					polarity = default_PCI_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					polarity = default_MCA_polarity(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					polarity = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* high active */
-+		{
-+			polarity = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+		case 3: /* low active */
-+		{
-+			polarity = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+	}
-+	return polarity;
-+}
++#
++# Active Eicon DIVA Server cards
++#
++# CONFIG_CAPI_EICON is not set
 +
-+static int MPBIOS_trigger(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int trigger;
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
 +
-+	/*
-+	 * Determine IRQ trigger mode (edge or level sensitive):
-+	 */
-+	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					trigger = default_ISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					trigger = default_EISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					trigger = default_PCI_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					trigger = default_MCA_trigger(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					trigger = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* edge */
-+		{
-+			trigger = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 1;
-+			break;
-+		}
-+		case 3: /* level */
-+		{
-+			trigger = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 0;
-+			break;
-+		}
-+	}
-+	return trigger;
-+}
++#
++# Input device support
++#
++CONFIG_INPUT=y
 +
-+static inline int irq_polarity(int idx)
-+{
-+	return MPBIOS_polarity(idx);
-+}
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
 +
-+static inline int irq_trigger(int idx)
-+{
-+	return MPBIOS_trigger(idx);
-+}
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++# CONFIG_KEYBOARD_LKKBD is not set
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++# CONFIG_MOUSE_VSXXXAA is not set
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
 +
-+static int next_irq = 16;
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
 +
-+/*
-+ * gsi_irq_sharing -- Name overload!  "irq" can be either a legacy IRQ
-+ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
-+ * from ACPI, which can reach 800 in large boxen.
-+ *
-+ * Compact the sparse GSI space into a sequential IRQ series and reuse
-+ * vectors if possible.
-+ */
-+int gsi_irq_sharing(int gsi)
-+{
-+	int i, tries, vector;
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
 +
-+	BUG_ON(gsi >= NR_IRQ_VECTORS);
++#
++# Serial drivers
++#
 +
-+	if (platform_legacy_irq(gsi))
-+		return gsi;
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=64
++CONFIG_PRINTER=m
++CONFIG_LP_CONSOLE=y
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
 +
-+	if (gsi_2_irq[gsi] != 0xFF)
-+		return (int)gsi_2_irq[gsi];
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++CONFIG_IPMI_PANIC_EVENT=y
++# CONFIG_IPMI_PANIC_STRING is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
 +
-+	tries = NR_IRQS;
-+  try_again:
-+	vector = assign_irq_vector(gsi);
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
 +
-+	/*
-+	 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
-+	 * use of vector and if found, return that IRQ.  However, we never want
-+	 * to share legacy IRQs, which usually have a different trigger mode
-+	 * than PCI.
-+	 */
-+	for (i = 0; i < NR_IRQS; i++)
-+		if (IO_APIC_VECTOR(i) == vector)
-+			break;
-+	if (platform_legacy_irq(i)) {
-+		if (--tries >= 0) {
-+			IO_APIC_VECTOR(i) = 0;
-+			goto try_again;
-+		}
-+		panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
-+	}
-+	if (i < NR_IRQS) {
-+		gsi_2_irq[gsi] = i;
-+		printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
-+				gsi, vector, i);
-+		return i;
-+	}
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_IBMASR=m
++CONFIG_WAFER_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_SBC8360_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_MACHZ_WDT=m
++CONFIG_SBC_EPX_C3_WATCHDOG=m
 +
-+	i = next_irq++;
-+	BUG_ON(i >= NR_IRQS);
-+	gsi_2_irq[gsi] = i;
-+	IO_APIC_VECTOR(i) = vector;
-+	printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
-+			gsi, vector, i);
-+	return i;
-+}
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
 +
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+	int irq, i;
-+	int bus = mp_irqs[idx].mpc_srcbus;
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_INTEL=m
++CONFIG_HW_RANDOM_AMD=m
++CONFIG_HW_RANDOM_GEODE=m
++CONFIG_NVRAM=y
++CONFIG_RTC=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
 +
-+	/*
-+	 * Debugging check, we are in big trouble if this message pops up!
-+	 */
-+	if (mp_irqs[idx].mpc_dstirq != pin)
-+		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_VIA=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++# CONFIG_DRM_I830 is not set
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++# CONFIG_DRM_SIS is not set
++CONFIG_DRM_VIA=m
++CONFIG_DRM_SAVAGE=m
 +
-+	switch (mp_bus_id_to_type[bus])
-+	{
-+		case MP_BUS_ISA: /* ISA pin */
-+		case MP_BUS_EISA:
-+		case MP_BUS_MCA:
-+		{
-+			irq = mp_irqs[idx].mpc_srcbusirq;
-+			break;
-+		}
-+		case MP_BUS_PCI: /* PCI pin */
-+		{
-+			/*
-+			 * PCI IRQs are mapped in order
-+			 */
-+			i = irq = 0;
-+			while (i < apic)
-+				irq += nr_ioapic_registers[i++];
-+			irq += pin;
-+			irq = gsi_irq_sharing(irq);
-+			break;
-+		}
-+		default:
-+		{
-+			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-+			irq = 0;
-+			break;
-+		}
-+	}
-+	BUG_ON(irq >= NR_IRQS);
++#
++# PCMCIA character devices
++#
++# CONFIG_SYNCLINK_CS is not set
++# CONFIG_CARDMAN_4000 is not set
++# CONFIG_CARDMAN_4040 is not set
++# CONFIG_MWAVE is not set
++CONFIG_PC8736x_GPIO=m
++CONFIG_NSC_GPIO=m
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++CONFIG_HANGCHECK_TIMER=m
 +
-+	/*
-+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+	 */
-+	if ((pin >= 16) && (pin <= 23)) {
-+		if (pirq_entries[pin-16] != -1) {
-+			if (!pirq_entries[pin-16]) {
-+				apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
-+			} else {
-+				irq = pirq_entries[pin-16];
-+				apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
-+						pin-16, irq);
-+			}
-+		}
-+	}
-+	BUG_ON(irq >= NR_IRQS);
-+	return irq;
-+}
++#
++# TPM devices
++#
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_INFINEON=m
++CONFIG_TCG_XEN=m
++CONFIG_TELCLOCK=m
 +
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+	int apic, idx, pin;
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
 +
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			idx = find_irq_entry(apic,pin,mp_INT);
-+			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+				return irq_trigger(idx);
-+		}
-+	}
-+	/*
-+	 * nonexistent IRQs are edge default
-+	 */
-+	return 0;
-+}
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
 +
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
 +
-+int assign_irq_vector(int irq)
-+{
-+	unsigned long flags;
-+	int vector;
-+	struct physdev_irq irq_op;
-+  
-+  	BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_DS1374=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCA9539=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_MAX6875=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
 +
-+	spin_lock_irqsave(&vector_lock, flags);
++#
++# SPI support
++#
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
 +
-+  	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
-+		spin_unlock_irqrestore(&vector_lock, flags);
-+  		return IO_APIC_VECTOR(irq);
-+	}
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
 +
-+	irq_op.irq = irq;
-+	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
-+		spin_unlock_irqrestore(&vector_lock, flags);
-+		return -ENOSPC;
-+	}
++#
++# SPI Protocol Masters
++#
 +
-+	vector = irq_op.vector;
-+	vector_irq[vector] = irq;
-+	if (irq != AUTO_ASSIGN)
-+		IO_APIC_VECTOR(irq) = vector;
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_CON=y
 +
-+	spin_unlock_irqrestore(&vector_lock, flags);
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
 +
-+	return vector;
-+}
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
 +
-+extern void (*interrupt[NR_IRQS])(void);
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=m
++CONFIG_HWMON_VID=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
 +
-+#define IOAPIC_AUTO	-1
-+#define IOAPIC_EDGE	0
-+#define IOAPIC_LEVEL	1
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
 +
-+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+	unsigned idx;
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
 +
-+	idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++#
++# Video Capture Adapters
++#
 +
-+	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+			trigger == IOAPIC_LEVEL)
-+		irq_desc[idx].chip = &ioapic_level_type;
-+	else
-+		irq_desc[idx].chip = &ioapic_edge_type;
-+	set_intr_gate(vector, interrupt[idx]);
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif /* !CONFIG_XEN */
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_VIVI=m
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_DVB_ALL_FRONTENDS=y
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_DVB_ALL_FRONTENDS=y
 +
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+	struct IO_APIC_route_entry entry;
-+	int apic, pin, idx, irq, first_notcon = 1, vector;
-+	unsigned long flags;
++#
++# Encoders and Decoders
++#
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_TLV320AIC23B=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_CX25840=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
 +
-+	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++#
++# V4L USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_24XXX=y
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_USBVIDEO=m
++CONFIG_USB_VICAM=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_QUICKCAM_MESSENGER=m
++CONFIG_USB_ET61X251=m
++CONFIG_VIDEO_OVCAMCHIP=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_ZC0301=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
 +
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++#
++# Radio Adapters
++#
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_USB_DSBR=m
 +
-+		/*
-+		 * add it to the IO-APIC irq-routing table:
-+		 */
-+		memset(&entry,0,sizeof(entry));
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
 +
-+		entry.delivery_mode = INT_DELIVERY_MODE;
-+		entry.dest_mode = INT_DEST_MODE;
-+		entry.mask = 0;				/* enable IRQ */
-+		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
 +
-+		idx = find_irq_entry(apic,pin,mp_INT);
-+		if (idx == -1) {
-+			if (first_notcon) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+				first_notcon = 0;
-+			} else
-+				apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+			continue;
-+		}
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_CINERGYT2=m
++CONFIG_DVB_CINERGYT2_TUNING=y
++CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
++CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
++CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
++CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
++CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
 +
-+		entry.trigger = irq_trigger(idx);
-+		entry.polarity = irq_polarity(idx);
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
 +
-+		if (irq_trigger(idx)) {
-+			entry.trigger = 1;
-+			entry.mask = 1;
-+			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+		}
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
 +
-+		irq = pin_2_irq(idx, apic, pin);
-+		add_pin_to_irq(irq, apic, pin);
++#
++# Supported Pluto2 Adapters
++#
++CONFIG_DVB_PLUTO2=m
 +
-+		if (/* !apic && */ !IO_APIC_IRQ(irq))
-+			continue;
++#
++# Supported DVB Frontends
++#
 +
-+		if (IO_APIC_IRQ(irq)) {
-+			vector = assign_irq_vector(irq);
-+			entry.vector = vector;
++#
++# Customise DVB Frontends
++#
 +
-+			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+			if (!apic && (irq < 16))
-+				disable_8259A_irq(irq);
-+		}
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+		set_native_irq_info(irq, TARGET_CPUS);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
-+	}
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_S5H1420=m
 +
-+	if (!first_notcon)
-+		apic_printk(APIC_VERBOSE," not connected.\n");
-+}
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
 +
-+#ifndef CONFIG_XEN
-+/*
-+ * Set up the 8259A-master output pin as broadcast to all
-+ * CPUs.
-+ */
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
 +
-+	memset(&entry,0,sizeof(entry));
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
 +
-+	disable_8259A_irq(0);
++#
++# Miscellaneous devices
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_ISL6421=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BUF_DVB=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_USB_DABUSB=m
 +
-+	/* mask LVT0 */
-+	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++# CONFIG_FB_PM2 is not set
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_ARC=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++CONFIG_FB_VESA=y
++CONFIG_FB_HGA=m
++CONFIG_FB_HGA_ACCEL=y
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++# CONFIG_FB_RIVA_DEBUG is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MAVEN=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++CONFIG_FB_3DFX_ACCEL=y
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++CONFIG_FB_TRIDENT_ACCEL=y
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX=m
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
 +
-+	/*
-+	 * We use logical delivery to get the timer IRQ
-+	 * to the first CPU.
-+	 */
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.mask = 0;					/* unmask IRQ now */
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.polarity = 0;
-+	entry.trigger = 0;
-+	entry.vector = vector;
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++CONFIG_BACKLIGHT_DEVICE=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_DEVICE=y
 +
-+	/*
-+	 * The timer IRQ doesn't have to know that behind the
-+	 * scene we have a 8259A-master in AEOI mode ...
-+	 */
-+	irq_desc[0].chip = &ioapic_edge_type;
++#
++# Sound
++#
++CONFIG_SOUND=m
 +
-+	/*
-+	 * Add it to the IO-APIC irq-routing table:
-+	 */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
 +
-+	enable_8259A_irq(0);
-+}
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_AC97_BUS=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
 +
-+void __init UNEXPECTED_IO_APIC(void)
-+{
-+}
++#
++# PCI devices
++#
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
 +
-+void __apicdebuginit print_IO_APIC(void)
-+{
-+	int apic, i;
-+	union IO_APIC_reg_00 reg_00;
-+	union IO_APIC_reg_01 reg_01;
-+	union IO_APIC_reg_02 reg_02;
-+	unsigned long flags;
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
 +
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_PDAUDIOCF=m
 +
-+	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+	for (i = 0; i < nr_ioapics; i++)
-+		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
 +
-+	/*
-+	 * We are a bit conservative about what we expect.  We have to
-+	 * know about every hardware change ASAP.
-+	 */
-+	printk(KERN_INFO "testing the IO APIC.......................\n");
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=m
++# CONFIG_USB_DEBUG is not set
 +
-+	for (apic = 0; apic < nr_ioapics; apic++) {
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(apic, 0);
-+	reg_01.raw = io_apic_read(apic, 1);
-+	if (reg_01.bits.version >= 0x10)
-+		reg_02.raw = io_apic_read(apic, 2);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=m
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++# CONFIG_USB_SL811_CS is not set
 +
-+	printk("\n");
-+	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-+	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
 +
-+	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
-+	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-+	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+		(reg_01.bits.entries != 0x2E) &&
-+		(reg_01.bits.entries != 0x3F) &&
-+		(reg_01.bits.entries != 0x03) 
-+	)
-+		UNEXPECTED_IO_APIC();
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
 +
-+	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-+	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+		(reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
-+		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
-+	)
-+		UNEXPECTED_IO_APIC();
-+	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
 +
-+	if (reg_01.bits.version >= 0x10) {
-+		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-+		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+			UNEXPECTED_IO_APIC();
-+	}
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++CONFIG_HID_FF=y
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_USB_HIDDEV=y
 +
-+	printk(KERN_DEBUG ".... IRQ redirection table:\n");
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_ACECAD=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_TOUCHSCREEN=m
++CONFIG_USB_TOUCHSCREEN_EGALAX=y
++CONFIG_USB_TOUCHSCREEN_PANJIT=y
++CONFIG_USB_TOUCHSCREEN_3M=y
++CONFIG_USB_TOUCHSCREEN_ITM=y
++CONFIG_USB_YEALINK=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++CONFIG_USB_ATI_REMOTE2=m
++CONFIG_USB_KEYSPAN_REMOTE=m
++CONFIG_USB_APPLETOUCH=m
 +
-+	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+			  " Stat Dest Deli Vect:   \n");
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
 +
-+	for (i = 0; i <= reg_01.bits.entries; i++) {
-+		struct IO_APIC_route_entry entry;
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_MON=y
 +
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
 +
-+		printk(KERN_DEBUG " %02x %03X %02X  ",
-+			i,
-+			entry.dest.logical.logical_dest,
-+			entry.dest.physical.physical_dest
-+		);
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++# CONFIG_USB_SERIAL_OPTION is not set
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
 +
-+		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-+			entry.mask,
-+			entry.trigger,
-+			entry.irr,
-+			entry.polarity,
-+			entry.delivery_status,
-+			entry.dest_mode,
-+			entry.delivery_mode,
-+			entry.vector
-+		);
-+	}
-+	}
-+	if (use_pci_vector())
-+		printk(KERN_INFO "Using vector-based indexing\n");
-+	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+	for (i = 0; i < NR_IRQS; i++) {
-+		struct irq_pin_list *entry = irq_2_pin + i;
-+		if (entry->pin < 0)
-+			continue;
-+ 		if (use_pci_vector() && !platform_legacy_irq(i))
-+			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+		else
-+			printk(KERN_DEBUG "IRQ%d ", i);
-+		for (;;) {
-+			printk("-> %d:%d", entry->apic, entry->pin);
-+			if (!entry->next)
-+				break;
-+			entry = irq_2_pin + entry->next;
-+		}
-+		printk("\n");
-+	}
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_USB_LD=m
++CONFIG_USB_TEST=m
 +
-+	printk(KERN_INFO ".................................... done.\n");
++#
++# USB DSL modem support
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
 +
-+	return;
-+}
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
 +
-+#if 0
++#
++# MMC/SD Card support
++#
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_WBSD=m
 +
-+static __apicdebuginit void print_APIC_bitfield (int base)
-+{
-+	unsigned int v;
-+	int i, j;
++#
++# LED devices
++#
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=m
 +
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
++#
++# LED drivers
++#
 +
-+	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+	for (i = 0; i < 8; i++) {
-+		v = apic_read(base + i*0x10);
-+		for (j = 0; j < 32; j++) {
-+			if (v & (1<<j))
-+				printk("1");
-+			else
-+				printk("0");
-+		}
-+		printk("\n");
-+	}
-+}
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 +
-+void __apicdebuginit print_local_APIC(void * dummy)
-+{
-+	unsigned int v, ver, maxlvt;
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=m
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_ISER=m
 +
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++CONFIG_EDAC=m
 +
-+	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+		smp_processor_id(), hard_smp_processor_id());
-+	v = apic_read(APIC_ID);
-+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
-+	v = apic_read(APIC_LVR);
-+	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+	ver = GET_APIC_VERSION(v);
-+	maxlvt = get_maxlvt();
++#
++# Reporting subsystems
++#
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_MM_EDAC=m
++CONFIG_EDAC_E752X=m
++CONFIG_EDAC_POLL=y
 +
-+	v = apic_read(APIC_TASKPRI);
-+	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
 +
-+	v = apic_read(APIC_ARBPRI);
-+	printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+		v & APIC_ARBPRI_MASK);
-+	v = apic_read(APIC_PROCPRI);
-+	printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 +
-+	v = apic_read(APIC_EOI);
-+	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+	v = apic_read(APIC_RRR);
-+	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+	v = apic_read(APIC_LDR);
-+	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+	v = apic_read(APIC_DFR);
-+	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+	v = apic_read(APIC_SPIV);
-+	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++#
++# RTC drivers
++#
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_V3020=m
 +
-+	printk(KERN_DEBUG "... APIC ISR field:\n");
-+	print_APIC_bitfield(APIC_ISR);
-+	printk(KERN_DEBUG "... APIC TMR field:\n");
-+	print_APIC_bitfield(APIC_TMR);
-+	printk(KERN_DEBUG "... APIC IRR field:\n");
-+	print_APIC_bitfield(APIC_IRR);
++#
++# DMA Engine support
++#
++CONFIG_DMA_ENGINE=y
 +
-+	v = apic_read(APIC_ESR);
-+	printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++#
++# DMA Clients
++#
++CONFIG_NET_DMA=y
 +
-+	v = apic_read(APIC_ICR);
-+	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+	v = apic_read(APIC_ICR2);
-+	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++#
++# DMA Devices
++#
++CONFIG_INTEL_IOATDMA=m
 +
-+	v = apic_read(APIC_LVTT);
-+	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++CONFIG_DELL_RBU=m
++CONFIG_DCDBAS=m
 +
-+	if (maxlvt > 3) {                       /* PC is LVT#4. */
-+		v = apic_read(APIC_LVTPC);
-+		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+	}
-+	v = apic_read(APIC_LVT0);
-+	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+	v = apic_read(APIC_LVT1);
-+	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_QUOTA=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=m
 +
-+	if (maxlvt > 2) {			/* ERR is LVT#3. */
-+		v = apic_read(APIC_LVTERR);
-+		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+	}
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
 +
-+	v = apic_read(APIC_TMICT);
-+	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+	v = apic_read(APIC_TMCCT);
-+	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+	v = apic_read(APIC_TDCR);
-+	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+	printk("\n");
-+}
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
 +
-+void print_all_local_APICs (void)
-+{
-+	on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++CONFIG_CONFIGFS_FS=m
 +
-+void __apicdebuginit print_PIC(void)
-+{
-+	unsigned int v;
-+	unsigned long flags;
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++CONFIG_JFFS2_SUMMARY=y
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_FS_POSIX_ACL=y
++CONFIG_JFFS2_FS_SECURITY=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_JFFS2_CMODE_NONE is not set
++CONFIG_JFFS2_CMODE_PRIORITY=y
++# CONFIG_JFFS2_CMODE_SIZE is not set
++CONFIG_CRAMFS=m
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
 +
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp850"
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++CONFIG_9P_FS=m
 +
-+	printk(KERN_DEBUG "\nprinting PIC contents\n");
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++CONFIG_SUN_PARTITION=y
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
 +
-+	spin_lock_irqsave(&i8259A_lock, flags);
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
 +
-+	v = inb(0xa1) << 8 | inb(0x21);
-+	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
 +
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=15
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
 +
-+	outb(0x0b,0xa0);
-+	outb(0x0b,0x20);
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	outb(0x0a,0xa0);
-+	outb(0x0a,0x20);
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++# CONFIG_SECURITY_NETWORK_XFRM is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++# CONFIG_SECURITY_SELINUX is not set
 +
-+	spin_unlock_irqrestore(&i8259A_lock, flags);
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES=m
++CONFIG_CRYPTO_AES_X86_64=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
 +
-+	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
 +
-+	v = inb(0x4d1) << 8 | inb(0x4d0);
-+	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=m
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
 +
-+#endif  /*  0  */
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC16=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_PLIST=y
+diff -r d894e36cfc30 -r 0aa021803deb drivers/Makefile
+--- a/drivers/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -31,6 +31,7 @@
+ obj-$(CONFIG_NUBUS)		+= nubus/
+ obj-$(CONFIG_ATM)		+= atm/
+ obj-$(CONFIG_PPC_PMAC)		+= macintosh/
++obj-$(CONFIG_XEN)		+= xen/
+ obj-$(CONFIG_IDE)		+= ide/
+ obj-$(CONFIG_FC4)		+= fc4/
+ obj-$(CONFIG_SCSI)		+= scsi/
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/Kconfig
+--- a/drivers/acpi/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -305,6 +305,7 @@
+ config X86_PM_TIMER
+ 	bool "Power Management Timer Support" if EMBEDDED
+ 	depends on X86
++	depends on !XEN
+ 	default y
+ 	help
+ 	  The Power Management Timer is available on all ACPI-capable,
+@@ -362,6 +363,15 @@
+ 	  A "Smart Battery" is quite old and quite rare compared
+ 	  to today's ACPI "Control Method" battery.
+ 
++config ACPI_PV_SLEEP
++	bool
++	depends on X86 && XEN && ACPI_SLEEP
++	default y
 +
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
++config PROCESSOR_EXTERNAL_CONTROL
++	bool
++	depends on X86 && XEN
++	default y
+ endif	# ACPI
+ 
+ endmenu
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/Makefile
+--- a/drivers/acpi/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -34,6 +34,9 @@
+ ifdef CONFIG_CPU_FREQ
+ processor-objs	+= processor_perflib.o			
+ endif
++ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++processor-objs	+= processor_perflib.o processor_extcntl.o
++endif
+ 
+ obj-y				+= sleep/
+ obj-y				+= bus.o glue.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/dispatcher/dsobject.c
+--- a/drivers/acpi/dispatcher/dsobject.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/dispatcher/dsobject.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -137,6 +137,71 @@
+ 				return_ACPI_STATUS(status);
+ 			}
+ 		}
 +
-+static void __init enable_IO_APIC(void)
-+{
-+	union IO_APIC_reg_01 reg_01;
-+#ifndef CONFIG_XEN
-+	int i8259_apic, i8259_pin;
-+#endif
-+	int i, apic;
-+	unsigned long flags;
++		/* Special object resolution for elements of a package */
 +
-+	for (i = 0; i < PIN_MAP_SIZE; i++) {
-+		irq_2_pin[i].pin = -1;
-+		irq_2_pin[i].next = 0;
-+	}
-+	if (!pirqs_enabled)
-+		for (i = 0; i < MAX_PIRQS; i++)
-+			pirq_entries[i] = -1;
++		if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
++		    (op->common.parent->common.aml_opcode ==
++		     AML_VAR_PACKAGE_OP)) {
++			/*
++			 * Attempt to resolve the node to a value before we insert it into
++			 * the package. If this is a reference to a common data type,
++			 * resolve it immediately. According to the ACPI spec, package
++			 * elements can only be "data objects" or method references.
++			 * Attempt to resolve to an Integer, Buffer, String or Package.
++			 * If cannot, return the named reference (for things like Devices,
++			 * Methods, etc.) Buffer Fields and Fields will resolve to simple
++			 * objects (int/buf/str/pkg).
++			 *
++			 * NOTE: References to things like Devices, Methods, Mutexes, etc.
++			 * will remain as named references. This behavior is not described
++			 * in the ACPI spec, but it appears to be an oversight.
++			 */
++			obj_desc = (union acpi_operand_object *)op->common.node;
 +
-+	/*
-+	 * The number of IO-APIC IRQ registers (== #pins):
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_01.raw = io_apic_read(apic, 1);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+	}
-+#ifndef CONFIG_XEN
-+	for(apic = 0; apic < nr_ioapics; apic++) {
-+		int pin;
-+		/* See if any of the pins is in ExtINT mode */
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			struct IO_APIC_route_entry entry;
-+			spin_lock_irqsave(&ioapic_lock, flags);
-+			*(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+			*(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+			spin_unlock_irqrestore(&ioapic_lock, flags);
++			status =
++			    acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
++							  (struct
++							   acpi_namespace_node,
++							   &obj_desc),
++							  walk_state);
++			if (ACPI_FAILURE(status)) {
++				return_ACPI_STATUS(status);
++			}
++
++			switch (op->common.node->type) {
++				/*
++				 * For these types, we need the actual node, not the subobject.
++				 * However, the subobject got an extra reference count above.
++				 */
++			case ACPI_TYPE_MUTEX:
++			case ACPI_TYPE_METHOD:
++			case ACPI_TYPE_POWER:
++			case ACPI_TYPE_PROCESSOR:
++			case ACPI_TYPE_EVENT:
++			case ACPI_TYPE_REGION:
++			case ACPI_TYPE_DEVICE:
++			case ACPI_TYPE_THERMAL:
++
++				obj_desc =
++				    (union acpi_operand_object *)op->common.
++				    node;
++				break;
 +
++			default:
++				break;
++			}
 +
-+			/* If the interrupt line is enabled and in ExtInt mode
-+			 * I have found the pin where the i8259 is connected.
++			/*
++			 * If above resolved to an operand object, we are done. Otherwise,
++			 * we have a NS node, we must create the package entry as a named
++			 * reference.
 +			 */
-+			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+				ioapic_i8259.apic = apic;
-+				ioapic_i8259.pin  = pin;
-+				goto found_i8259;
++			if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
++			    ACPI_DESC_TYPE_NAMED) {
++				goto exit;
 +			}
 +		}
-+	}
-+ found_i8259:
-+	/* Look to see what if the MP table has reported the ExtINT */
-+	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
-+	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+	/* Trust the MP table if nothing is setup in the hardware */
-+	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+		ioapic_i8259.pin  = i8259_pin;
-+		ioapic_i8259.apic = i8259_apic;
-+	}
-+	/* Complain if the MP table and the hardware disagree */
-+	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+	{
-+		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
+ 	}
+ 
+ 	/* Create and init a new internal ACPI object */
+@@ -156,6 +221,7 @@
+ 		return_ACPI_STATUS(status);
+ 	}
+ 
++      exit:
+ 	*obj_desc_ptr = obj_desc;
+ 	return_ACPI_STATUS(AE_OK);
+ }
+@@ -358,12 +424,25 @@
+ 	arg = arg->common.next;
+ 	for (i = 0; arg; i++) {
+ 		if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
++			if (arg->common.node->type == ACPI_TYPE_METHOD) {
++				/*
++				 * A method reference "looks" to the parser to be a method
++				 * invocation, so we special case it here
++				 */
++				arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
++				status =
++				    acpi_ds_build_internal_object(walk_state,
++								  arg,
++								  &obj_desc->
++								  package.
++								  elements[i]);
++			} else {
++				/* This package element is already built, just get it */
+ 
+-			/* Object (package or buffer) is already built */
+-
+-			obj_desc->package.elements[i] =
+-			    ACPI_CAST_PTR(union acpi_operand_object,
+-					  arg->common.node);
++				obj_desc->package.elements[i] =
++				    ACPI_CAST_PTR(union acpi_operand_object,
++						  arg->common.node);
++			}
+ 		} else {
+ 			status = acpi_ds_build_internal_object(walk_state, arg,
+ 							       &obj_desc->
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/hardware/hwregs.c
+--- a/drivers/acpi/hardware/hwregs.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/hardware/hwregs.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -665,8 +665,6 @@
+ 
+ 		/*
+ 		 * Perform a read first to preserve certain bits (per ACPI spec)
+-		 *
+-		 * Note: This includes SCI_EN, we never want to change this bit
+ 		 */
+ 		status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
+ 					       ACPI_REGISTER_PM1_CONTROL,
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/hardware/hwsleep.c
+--- a/drivers/acpi/hardware/hwsleep.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/hardware/hwsleep.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -227,7 +227,11 @@
+ 	u32 PM1Bcontrol;
+ 	struct acpi_bit_register_info *sleep_type_reg_info;
+ 	struct acpi_bit_register_info *sleep_enable_reg_info;
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ 	u32 in_value;
++#else
++	int err;
++#endif
+ 	acpi_status status;
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+@@ -327,6 +331,7 @@
+ 
+ 	ACPI_FLUSH_CPU_CACHE();
+ 
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ 	status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
+ 					ACPI_REGISTER_PM1A_CONTROL,
+ 					PM1Acontrol);
+@@ -376,6 +381,16 @@
+ 		/* Spin until we wake */
+ 
+ 	} while (!in_value);
++#else
++	/* PV ACPI just need check hypercall return value */
++	err = acpi_notify_hypervisor_state(sleep_state,
++			PM1Acontrol, PM1Bcontrol);
++	if (err) {
++		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
++				  "Hypervisor failure [%d]\n", err));
++		return_ACPI_STATUS(AE_ERROR);
 +	}
 +#endif
+ 
+ 	return_ACPI_STATUS(AE_OK);
+ }
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/processor_core.c
+--- a/drivers/acpi/processor_core.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/processor_core.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -475,7 +475,8 @@
+ 	 */
+ 	if (cpu_index == -1) {
+ 		if (ACPI_FAILURE
+-		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
++		    (acpi_processor_hotadd_init(pr->handle, &pr->id)) &&
++		    !processor_cntl_external()) {
+ 			printk(KERN_ERR PREFIX
+ 				    "Getting cpuindex for acpiid 0x%x\n",
+ 				    pr->acpi_id);
+@@ -508,7 +509,7 @@
+ 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+ 	}
+ 
+-#ifdef CONFIG_CPU_FREQ
++#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
+ 	acpi_processor_ppc_has_changed(pr);
+ #endif
+ 	acpi_processor_get_throttling_info(pr);
+@@ -517,7 +518,7 @@
+ 	return 0;
+ }
+ 
+-static void *processor_device_array[NR_CPUS];
++static void *processor_device_array[NR_ACPI_CPUS];
+ 
+ static int acpi_processor_start(struct acpi_device *device)
+ {
+@@ -525,31 +526,46 @@
+ 	acpi_status status = AE_OK;
+ 	struct acpi_processor *pr;
+ 
++	processor_extcntl_init();
+ 
+ 	pr = acpi_driver_data(device);
+ 
+ 	result = acpi_processor_get_info(pr);
+-	if (result) {
++	if (result || 
++	    ((pr->id == -1) && !processor_cntl_external())) {
+ 		/* Processor is physically not present */
+ 		return 0;
+ 	}
+ 
+-	BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
++	BUG_ON(!processor_cntl_external() &&
++	       ((pr->id >= NR_CPUS) || (pr->id < 0)));
+ 
+ 	/*
+ 	 * Buggy BIOS check
+ 	 * ACPI id of processors can be reported wrongly by the BIOS.
+ 	 * Don't trust it blindly
+ 	 */
++#ifdef CONFIG_XEN
++	BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);
++	if (processor_device_array[pr->acpi_id] != NULL &&
++	    processor_device_array[pr->acpi_id] != (void *)device) {
++#else
+ 	if (processor_device_array[pr->id] != NULL &&
+ 	    processor_device_array[pr->id] != (void *)device) {
++#endif /* CONFIG_XEN */
+ 		printk(KERN_WARNING "BIOS reported wrong ACPI id"
+ 			"for the processor\n");
+ 		return -ENODEV;
+ 	}
++#ifdef CONFIG_XEN
++	processor_device_array[pr->acpi_id] = (void *)device;
++	if (pr->id != -1)
++		processors[pr->id] = pr;
++#else
+ 	processor_device_array[pr->id] = (void *)device;
+ 
+ 	processors[pr->id] = pr;
++#endif /* CONFIG_XEN */
+ 
+ 	result = acpi_processor_add_fs(device);
+ 	if (result)
+@@ -563,6 +579,10 @@
+ 	acpi_processor_set_pdc(pr);
+ 
+ 	acpi_processor_power_init(pr, device);
 +
-+	/*
-+	 * Do not trust the IO-APIC being empty at bootup
-+	 */
-+	clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+	/*
-+	 * Clear the IO-APIC before rebooting:
-+	 */
-+	clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+	/*
-+	 * If the i8259 is routed through an IOAPIC
-+	 * Put that IOAPIC in virtual wire mode
-+	 * so legacy interrupts can be delivered.
-+	 */
-+	if (ioapic_i8259.pin != -1) {
-+		struct IO_APIC_route_entry entry;
-+		unsigned long flags;
++	result = processor_extcntl_prepare(pr);
++	if (result)
++		goto end;
+ 
+ 	if (pr->flags.throttling) {
+ 		printk(KERN_INFO PREFIX "%s [%s] (supports",
+@@ -656,7 +676,13 @@
+ 
+ 	acpi_processor_remove_fs(device);
+ 
++#ifdef CONFIG_XEN
++	if (pr->id != -1)
++		processors[pr->id] = NULL;
++#else
+ 	processors[pr->id] = NULL;
++#endif /* CONFIG_XEN */
 +
-+		memset(&entry, 0, sizeof(entry));
-+		entry.mask            = 0; /* Enabled */
-+		entry.trigger         = 0; /* Edge */
-+		entry.irr             = 0;
-+		entry.polarity        = 0; /* High */
-+		entry.delivery_status = 0;
-+		entry.dest_mode       = 0; /* Physical */
-+		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
-+		entry.vector          = 0;
-+		entry.dest.physical.physical_dest =
-+					GET_APIC_ID(apic_read(APIC_ID));
+ 
+ 	kfree(pr);
+ 
+@@ -709,6 +735,10 @@
+ 	pr = acpi_driver_data(*device);
+ 	if (!pr)
+ 		return -ENODEV;
 +
-+		/*
-+		 * Add it to the IO-APIC irq-routing table:
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+1));
-+		io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+0));
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
++	if (processor_cntl_external())
++		processor_notify_external(pr,
++			PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
+ 
+ 	if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
+ 		kobject_uevent(&(*device)->kobj, KOBJ_ONLINE);
+@@ -747,6 +777,10 @@
+ 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
+ 			break;
+ 		}
 +
-+	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
++		if (processor_cntl_external())
++			processor_notify_external(pr,
++					PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
+ 
+ 		if (pr->id >= 0 && (pr->id < NR_CPUS)) {
+ 			kobject_uevent(&device->kobj, KOBJ_OFFLINE);
+@@ -777,8 +811,18 @@
+ 			return;
+ 		}
+ 
++#ifdef CONFIG_XEN
++		if ((pr->id >= 0) && (pr->id < NR_CPUS)
++		    && (cpu_present(pr->id)))
++#else
+ 		if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
++#endif /* CONFIG_XEN */
+ 			kobject_uevent(&device->kobj, KOBJ_OFFLINE);
++
++		if (processor_cntl_external())
++			processor_notify_external(pr, PROCESSOR_HOTPLUG,
++							HOTPLUG_TYPE_REMOVE);
++
+ 		break;
+ 	default:
+ 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+@@ -843,6 +887,11 @@
+ 
+ static int acpi_processor_handle_eject(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++	if (pr->id == -1)
++		return (0);
++#endif /* CONFIG_XEN */
 +
+ 	if (cpu_online(pr->id)) {
+ 		return (-EINVAL);
+ 	}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/processor_extcntl.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/acpi/processor_extcntl.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,241 @@
 +/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
++ * processor_extcntl.c - channel to external control logic
++ *
++ *  Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; either version 2 of the License, or (at
++ *  your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful, but
++ *  WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License along
++ *  with this program; if not, write to the Free Software Foundation, Inc.,
++ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 + *
-+ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
 + */
 +
-+#ifndef CONFIG_XEN
-+static void __init setup_ioapic_ids_from_mpc (void)
-+{
-+	union IO_APIC_reg_00 reg_00;
-+	int apic;
-+	int i;
-+	unsigned char old_id;
-+	unsigned long flags;
-+
-+	/*
-+	 * Set the IOAPIC ID to the value stored in the MPC table.
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
 +
-+		/* Read the register 0 value */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		
-+		old_id = mp_ioapics[apic].mpc_apicid;
++#include <acpi/processor.h>
 +
++#define ACPI_PROCESSOR_COMPONENT        0x01000000
++#define ACPI_PROCESSOR_CLASS            "processor"
++#define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
++#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
++ACPI_MODULE_NAME("acpi_processor")
 +
-+		printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++static int processor_extcntl_parse_csd(struct acpi_processor *pr);
++static int processor_extcntl_get_performance(struct acpi_processor *pr);
++/*
++ * External processor control logic may register with its own set of
++ * ops to get ACPI related notification. One example is like VMM.
++ */
++const struct processor_extcntl_ops *processor_extcntl_ops;
++EXPORT_SYMBOL(processor_extcntl_ops);
 +
++static int processor_notify_smm(void)
++{
++	acpi_status status;
++	static int is_done = 0;
 +
-+		/*
-+		 * We need to adjust the IRQ routing table
-+		 * if the ID changed.
-+		 */
-+		if (old_id != mp_ioapics[apic].mpc_apicid)
-+			for (i = 0; i < mp_irq_entries; i++)
-+				if (mp_irqs[i].mpc_dstapic == old_id)
-+					mp_irqs[i].mpc_dstapic
-+						= mp_ioapics[apic].mpc_apicid;
++	/* only need successfully notify BIOS once */
++	/* avoid double notification which may lead to unexpected result */
++	if (is_done)
++		return 0;
 +
-+		/*
-+		 * Read the right value from the MPC table and
-+		 * write it into the ID register.
-+	 	 */
-+		apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
-+				mp_ioapics[apic].mpc_apicid);
++	/* Can't write pstate_cnt to smi_cmd if either value is zero */
++	if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
++		ACPI_DEBUG_PRINT((ACPI_DB_INFO,"No SMI port or pstate_cnt\n"));
++		return 0;
++	}
 +
-+		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0, reg_00.raw);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
++	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++		"Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
++		acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
 +
-+		/*
-+		 * Sanity check
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+			printk("could not set ID!\n");
-+		else
-+			apic_printk(APIC_VERBOSE," ok.\n");
++	/* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
++	 * it anyway, so we need to support it... */
++	if (acpi_fadt_is_v1) {
++		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++			"Using v1.0 FADT reserved value for pstate_cnt\n"));
 +	}
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ *	- timer IRQ defaults to IO-APIC IRQ
-+ *	- if this function detects that timer IRQs are defunct, then we fall
-+ *	  back to ISA timer IRQs
-+ */
-+#ifndef CONFIG_XEN
-+static int __init timer_irq_works(void)
-+{
-+	unsigned long t1 = jiffies;
 +
-+	local_irq_enable();
-+	/* Let ten ticks pass... */
-+	mdelay((10 * 1000) / HZ);
++	status = acpi_os_write_port(acpi_fadt.smi_cmd,
++				    (u32) acpi_fadt.pstate_cnt, 8);
++	if (ACPI_FAILURE(status)) 
++		return status;
 +
-+	/*
-+	 * Expect a few ticks at least, to be sure some possible
-+	 * glue logic does not lock up after one or two first
-+	 * ticks in a non-ExtINT mode.  Also the local APIC
-+	 * might have cached one ExtINT interrupt.  Finally, at
-+	 * least one tick may be lost due to delays.
-+	 */
++	is_done = 1;
 +
-+	/* jiffies wrap? */
-+	if (jiffies - t1 > 4)
-+		return 1;
 +	return 0;
 +}
 +
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
++int processor_notify_external(struct acpi_processor *pr, int event, int type)
++{
++	int ret = -EINVAL;
 +
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
++	if (!processor_cntl_external())
++		return -EINVAL;
 +
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+	int was_pending = 0;
-+	unsigned long flags;
++	switch (event) {
++	case PROCESSOR_PM_INIT:
++	case PROCESSOR_PM_CHANGE:
++		if ((type >= PM_TYPE_MAX) ||
++			!processor_extcntl_ops->pm_ops[type])
++			break;
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	if (irq < 16) {
-+		disable_8259A_irq(irq);
-+		if (i8259A_irq_pending(irq))
-+			was_pending = 1;
++		ret = processor_extcntl_ops->pm_ops[type](pr, event);
++		break;
++	case PROCESSOR_HOTPLUG:
++		if (processor_extcntl_ops->hotplug)
++			ret = processor_extcntl_ops->hotplug(pr, type);
++		break;
++	default:
++		printk(KERN_ERR "Unsupport processor events %d.\n", event);
++		break;
 +	}
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+	return was_pending;
++	return ret;
 +}
 +
 +/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
++ * External control logic can decide to grab full or part of physical
++ * processor control bits. Take a VMM for example, physical processors
++ * are owned by VMM and thus existence information like hotplug is
++ * always required to be notified to VMM. Similar is processor idle
++ * state which is also necessarily controlled by VMM. But for other
++ * control bits like performance/throttle states, VMM may choose to
++ * control or not upon its own policy.
 + */
-+static void ack_edge_ioapic_irq(unsigned int irq)
++void processor_extcntl_init(void)
 +{
-+	move_irq(irq);
-+	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+					== (IRQ_PENDING | IRQ_DISABLED))
-+		mask_IO_APIC_irq(irq);
-+	ack_APIC_irq();
++	if (!processor_extcntl_ops)
++		arch_acpi_processor_init_extcntl(&processor_extcntl_ops);
 +}
 +
 +/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
++ * This is called from ACPI processor init, and targeted to hold
++ * some tricky housekeeping jobs to satisfy external control model.
++ * For example, we may put dependency parse stub here for idle
++ * and performance state. Those information may be not available
++ * if splitting from dom0 control logic like cpufreq driver.
 + */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+	unmask_IO_APIC_irq(irq);
-+
-+	return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
++int processor_extcntl_prepare(struct acpi_processor *pr)
 +{
-+	move_irq(irq);
-+	ack_APIC_irq();
-+}
++	/* parse cstate dependency information */
++	if (processor_pm_external())
++		processor_extcntl_parse_csd(pr);
 +
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
++	/* Initialize performance states */
++	if (processor_pmperf_external())
++		processor_extcntl_get_performance(pr);
 +
-+	return startup_edge_ioapic_irq(irq);
++	return 0;
 +}
 +
-+static void ack_edge_ioapic_vector(unsigned int vector)
++/*
++ * Currently no _CSD is implemented which is why existing ACPI code
++ * doesn't parse _CSD at all. But to keep interface complete with
++ * external control logic, we put a placeholder here for future
++ * compatibility.
++ */
++static int processor_extcntl_parse_csd(struct acpi_processor *pr)
 +{
-+	int irq = vector_to_irq(vector);
++	int i;
 +
-+	move_native_irq(vector);
-+	ack_edge_ioapic_irq(irq);
-+}
++	for (i = 0; i < pr->power.count; i++) {
++		if (!pr->power.states[i].valid)
++			continue;
 +
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
++		/* No dependency by default */
++		pr->power.states[i].domain_info = NULL;
++		pr->power.states[i].csd_count = 0;
++	}
 +
-+	return startup_level_ioapic_irq (irq);
++	return 0;
 +}
 +
-+static void end_level_ioapic_vector (unsigned int vector)
++/*
++ * Existing ACPI module does parse performance states at some point,
++ * when acpi-cpufreq driver is loaded which however is something
++ * we'd like to disable to avoid confliction with external control
++ * logic. So we have to collect raw performance information here 
++ * when ACPI processor object is found and started.
++ */
++static int processor_extcntl_get_performance(struct acpi_processor *pr)
 +{
-+	int irq = vector_to_irq(vector);
-+
-+	move_native_irq(vector);
-+	end_level_ioapic_irq(irq);
-+}
++	int ret;
++	struct acpi_processor_performance *perf;
++	struct acpi_psd_package *pdomain;
 +
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
++	if (pr->performance)
++		return -EBUSY;
 +
-+	mask_IO_APIC_irq(irq);
-+}
++	perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL);
++	if (!perf)
++		return -ENOMEM;
 +
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
++	pr->performance = perf;
++	/* Get basic performance state information */
++	ret = acpi_processor_get_performance_info(pr);
++	if (ret < 0)
++		goto err_out;
 +
-+	unmask_IO_APIC_irq(irq);
-+}
++	/*
++	 * Well, here we need retrieve performance dependency information
++	 * from _PSD object. The reason why existing interface is not used
++	 * is due to the reason that existing interface sticks to Linux cpu
++	 * id to construct some bitmap, however we want to split ACPI 
++	 * processor objects from Linux cpu id logic. For example, even
++	 * when Linux is configured as UP, we still want to parse all ACPI
++	 * processor objects to external logic. In this case, it's preferred
++	 * to use ACPI ID instead.
++	 */
++	pdomain = &pr->performance->domain_info;
++	pdomain->num_processors = 0;
++	ret = acpi_processor_get_psd(pr);
++	if (ret < 0) {
++		/*
++		 * _PSD is optional - assume no coordination if absent (or
++		 * broken), matching native kernels' behavior.
++		 */
++		pdomain->num_entries = ACPI_PSD_REV0_ENTRIES;
++		pdomain->revision = ACPI_PSD_REV0_REVISION;
++		pdomain->domain = pr->acpi_id;
++		pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL;
++		pdomain->num_processors = 1;
++	}
 +
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+					cpumask_t cpu_mask)
-+{
-+	int irq = vector_to_irq(vector);
++	/* Some sanity check */
++	if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
++	    (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
++	    ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
++	     (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY) &&
++	     (pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL))) {
++		ret = -EINVAL;
++		goto err_out;
++	}
 +
-+	set_native_irq_info(vector, cpu_mask);
-+	set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif // CONFIG_SMP
-+#endif // CONFIG_PCI_MSI
++	/* Last step is to notify BIOS that external logic exists */
++	processor_notify_smm();
 +
-+static int ioapic_retrigger(unsigned int irq)
-+{
-+	send_IPI_self(IO_APIC_VECTOR(irq));
++	processor_notify_external(pr, PROCESSOR_PM_INIT, PM_TYPE_PERF);
 +
-+	return 1;
++	return 0;
++err_out:
++	pr->performance = NULL;
++	kfree(perf);
++	return ret;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/processor_idle.c
+--- a/drivers/acpi/processor_idle.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/processor_idle.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -714,8 +714,12 @@
+ 		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
+ 			continue;
+ 
+-		cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
+-		    0 : reg->address;
++		if (!processor_pm_external())
++			cx.address = (reg->space_id ==
++				      ACPI_ADR_SPACE_FIXED_HARDWARE) ?
++		    		      0 : reg->address;
++		else
++			cx.address = reg->address;
+ 
+ 		/* There should be an easy way to extract an integer... */
+ 		obj = (union acpi_object *)&(element->package.elements[1]);
+@@ -724,7 +728,9 @@
+ 
+ 		cx.type = obj->integer.value;
+ 
+-		if ((cx.type != ACPI_STATE_C1) &&
++		/* Following check doesn't apply to external control case */
++		if (!processor_pm_external() &&
++		    (cx.type != ACPI_STATE_C1) &&
+ 		    (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
+ 			continue;
+ 
+@@ -742,6 +748,12 @@
+ 			continue;
+ 
+ 		cx.power = obj->integer.value;
 +
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+
-+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
-+	.typename = "IO-APIC-edge",
-+	.startup 	= startup_edge_ioapic,
-+	.shutdown 	= shutdown_edge_ioapic,
-+	.enable 	= enable_edge_ioapic,
-+	.disable 	= disable_edge_ioapic,
-+	.ack 		= ack_edge_ioapic,
-+	.end 		= end_edge_ioapic,
-+#ifdef CONFIG_SMP
-+	.set_affinity = set_ioapic_affinity,
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++		/* cache control methods to notify external logic */
++		if (processor_pm_external())
++			memcpy(&cx.reg, reg, sizeof(*reg));
 +#endif
-+	.retrigger	= ioapic_retrigger,
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
-+	.typename = "IO-APIC-level",
-+	.startup 	= startup_level_ioapic,
-+	.shutdown 	= shutdown_level_ioapic,
-+	.enable 	= enable_level_ioapic,
-+	.disable 	= disable_level_ioapic,
-+	.ack 		= mask_and_ack_level_ioapic,
-+	.end 		= end_level_ioapic,
-+#ifdef CONFIG_SMP
-+	.set_affinity = set_ioapic_affinity,
+ 
+ 		current_count++;
+ 		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
+@@ -985,12 +997,16 @@
+ 		return -ENODEV;
+ 
+ 	/* Fall back to the default idle loop */
+-	pm_idle = pm_idle_save;
++	if (!processor_pm_external())
++		pm_idle = pm_idle_save;
+ 	synchronize_sched();	/* Relies on interrupts forcing exit from idle. */
+ 
+ 	pr->flags.power = 0;
+ 	result = acpi_processor_get_power_info(pr);
+-	if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
++	if (processor_pm_external())
++		processor_notify_external(pr,
++			PROCESSOR_PM_CHANGE, PM_TYPE_IDLE);
++	else if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
+ 		pm_idle = acpi_processor_idle;
+ 
+ 	return result;
+@@ -1122,7 +1138,7 @@
+ 				       pr->power.states[i].type);
+ 		printk(")\n");
+ 
+-		if (pr->id == 0) {
++		if (!processor_pm_external() && (pr->id == 0)) {
+ 			pm_idle_save = pm_idle;
+ 			pm_idle = acpi_processor_idle;
+ 		}
+@@ -1141,6 +1157,9 @@
+ 
+ 	pr->flags.power_setup_done = 1;
+ 
++	if (processor_pm_external())
++		processor_notify_external(pr,
++			PROCESSOR_PM_INIT, PM_TYPE_IDLE);
+ 	return 0;
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/processor_perflib.c
+--- a/drivers/acpi/processor_perflib.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/processor_perflib.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -66,6 +66,7 @@
+ 
+ static int acpi_processor_ppc_status = 0;
+ 
++#ifdef CONFIG_CPU_FREQ
+ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
+ 				       unsigned long event, void *data)
+ {
+@@ -102,6 +103,7 @@
+ static struct notifier_block acpi_ppc_notifier_block = {
+ 	.notifier_call = acpi_processor_ppc_notifier,
+ };
++#endif	/* CONFIG_CPU_FREQ */
+ 
+ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+ {
+@@ -137,9 +139,15 @@
+ 	if (ret < 0)
+ 		return (ret);
+ 	else
++#ifdef CONFIG_CPU_FREQ
+ 		return cpufreq_update_policy(pr->id);
++#elif CONFIG_PROCESSOR_EXTERNAL_CONTROL
++		return processor_notify_external(pr,
++				PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
 +#endif
-+	.retrigger	= ioapic_retrigger,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+	int irq;
-+
-+	/*
-+	 * NOTE! The local APIC isn't very good at handling
-+	 * multiple interrupts at the same interrupt level.
-+	 * As the interrupt level is determined by taking the
-+	 * vector number and shifting that right by 4, we
-+	 * want to spread these out a bit so that they don't
-+	 * all fall in the same interrupt level.
-+	 *
-+	 * Also, we've got to be careful not to trash gate
-+	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+	 */
-+	for (irq = 0; irq < NR_IRQS ; irq++) {
-+		int tmp = irq;
-+		if (use_pci_vector()) {
-+			if (!platform_legacy_irq(tmp))
-+				if ((tmp = vector_to_irq(tmp)) == -1)
-+					continue;
-+		}
-+		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+			/*
-+			 * Hmm.. We don't have an entry for this,
-+			 * so default to an old-fashioned 8259
-+			 * interrupt if we can..
-+			 */
-+			if (irq < 16)
-+				make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+			else
-+				/* Strange. Oh, well.. */
-+				irq_desc[irq].chip = &no_irq_type;
+ }
+ 
++#ifdef CONFIG_CPU_FREQ
+ void acpi_processor_ppc_init(void)
+ {
+ 	if (!cpufreq_register_notifier
+@@ -158,6 +166,7 @@
+ 
+ 	acpi_processor_ppc_status &= ~PPC_REGISTERED;
+ }
++#endif	/* CONFIG_CPU_FREQ */
+ 
+ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
+ {
+@@ -299,7 +308,10 @@
+ 	return result;
+ }
+ 
+-static int acpi_processor_get_performance_info(struct acpi_processor *pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_performance_info(struct acpi_processor *pr)
+ {
+ 	int result = 0;
+ 	acpi_status status = AE_OK;
+@@ -331,6 +343,7 @@
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_CPU_FREQ
+ int acpi_processor_notify_smm(struct module *calling_module)
+ {
+ 	acpi_status status;
+@@ -398,6 +411,7 @@
+ }
+ 
+ EXPORT_SYMBOL(acpi_processor_notify_smm);
++#endif	/* CONFIG_CPU_FREQ */
+ 
+ #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
+ /* /proc/acpi/processor/../performance interface (DEPRECATED) */
+@@ -538,7 +552,10 @@
+ }
+ #endif				/* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+ 
+-static int acpi_processor_get_psd(struct acpi_processor	*pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_psd(struct acpi_processor *pr)
+ {
+ 	int result = 0;
+ 	acpi_status status = AE_OK;
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/resources/rsxface.c
+--- a/drivers/acpi/resources/rsxface.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/resources/rsxface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -476,8 +476,6 @@
+ 	return (AE_CTRL_TERMINATE);
+ }
+ 
+-ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
+-
+ /*******************************************************************************
+  *
+  * FUNCTION:    acpi_walk_resources
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/sleep/main.c
+--- a/drivers/acpi/sleep/main.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/sleep/main.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -91,7 +91,14 @@
+ 		break;
+ 
+ 	case PM_SUSPEND_MEM:
++#ifdef CONFIG_ACPI_PV_SLEEP
++		/* Hyperviosr will save and restore CPU context
++		 * and then we can skip low level housekeeping here.
++		 */
++		acpi_enter_sleep_state(acpi_state);
++#else
+ 		do_suspend_lowlevel();
++#endif
+ 		break;
+ 
+ 	case PM_SUSPEND_DISK:
+@@ -145,10 +152,12 @@
+ 	/* reset firmware waking vector */
+ 	acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+ 
++#ifndef CONFIG_ACPI_PV_SLEEP
+ 	if (init_8259A_after_S1) {
+ 		printk("Broken toshiba laptop -> kicking interrupts\n");
+ 		init_8259A(0);
+ 	}
++#endif
+ 	return 0;
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/sleep/poweroff.c
+--- a/drivers/acpi/sleep/poweroff.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/sleep/poweroff.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -20,6 +20,7 @@
+ int acpi_sleep_prepare(u32 acpi_state)
+ {
+ #ifdef CONFIG_ACPI_SLEEP
++#ifndef CONFIG_ACPI_PV_SLEEP
+ 	/* do we have a wakeup address for S2 and S3? */
+ 	if (acpi_state == ACPI_STATE_S3) {
+ 		if (!acpi_wakeup_address) {
+@@ -30,6 +31,7 @@
+ 							     acpi_wakeup_address));
+ 
+ 	}
 +#endif
+ 	ACPI_FLUSH_CPU_CACHE();
+ 	acpi_enable_wakeup_device_prep(acpi_state);
+ #endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/acpi/sleep/proc.c
+--- a/drivers/acpi/sleep/proc.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/acpi/sleep/proc.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -251,6 +251,8 @@
+ 
+ 	if (adjust) {
+ 		yr += CMOS_READ(RTC_YEAR);
++		if (acpi_gbl_FADT->century)
++			yr += CMOS_READ(acpi_gbl_FADT->century) * 100;
+ 		mo += CMOS_READ(RTC_MONTH);
+ 		day += CMOS_READ(RTC_DAY_OF_MONTH);
+ 		hr += CMOS_READ(RTC_HOURS);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/block/floppy.c
+--- a/drivers/block/floppy.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/block/floppy.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -4392,11 +4392,15 @@
+ 	if (fd_request_dma()) {
+ 		DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ 		       FLOPPY_DMA);
+-		fd_free_irq();
+-		spin_lock_irqsave(&floppy_usage_lock, flags);
+-		usage_count--;
+-		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+-		return -1;
++		if (can_use_virtual_dma & 2)
++			use_virtual_dma = can_use_virtual_dma = 1;
++		if (!(can_use_virtual_dma & 1)) {
++			fd_free_irq();
++			spin_lock_irqsave(&floppy_usage_lock, flags);
++			usage_count--;
++			spin_unlock_irqrestore(&floppy_usage_lock, flags);
++			return -1;
 +		}
+ 	}
+ 
+ 	for (fdc = 0; fdc < N_FDC; fdc++) {
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/Kconfig
+--- a/drivers/char/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -1013,7 +1013,7 @@
+ config HPET
+ 	bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ 	default n
+-	depends on ACPI
++	depends on ACPI && !XEN
+ 	help
+ 	  If you say Y here, you will have a miscdevice named "/dev/hpet/".  Each
+ 	  open selects one of the timers supported by the HPET.  The timers are
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/agp/amd64-agp.c
+--- a/drivers/char/agp/amd64-agp.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/agp/amd64-agp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,6 +15,7 @@
+ #include <linux/mmzone.h>
+ #include <asm/page.h>		/* PAGE_SIZE */
+ #include <asm/k8.h>
++#include <asm/e820.h>
+ #include "agp.h"
+ 
+ /* PTE bits. */
+@@ -252,7 +253,6 @@
+ /* Some basic sanity checks for the aperture. */
+ static int __devinit aperture_valid(u64 aper, u32 size)
+ {
+-	u32 pfn, c;
+ 	if (aper == 0) {
+ 		printk(KERN_ERR PFX "No aperture\n");
+ 		return 0;
+@@ -265,14 +265,9 @@
+ 		printk(KERN_ERR PFX "Aperture out of bounds\n");
+ 		return 0;
+ 	}
+-	pfn = aper >> PAGE_SHIFT;
+-	for (c = 0; c < size/PAGE_SIZE; c++) {
+-		if (!pfn_valid(pfn + c))
+-			break;
+-		if (!PageReserved(pfn_to_page(pfn + c))) {
+-			printk(KERN_ERR PFX "Aperture pointing to RAM\n");
+-			return 0;
+-		}
++	if (e820_any_mapped(aper, aper + size, E820_RAM)) {
++		printk(KERN_ERR PFX "Aperture pointing to RAM\n");
++		return 0;
+ 	}
+ 
+ 	/* Request the Aperture. This catches cases when someone else
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/agp/generic.c
+--- a/drivers/char/agp/generic.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/agp/generic.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -50,28 +50,6 @@
+  * nice to do this some other way instead of needing this export.
+  */
+ EXPORT_SYMBOL_GPL(agp_memory_reserved);
+-
+-#if defined(CONFIG_X86)
+-int map_page_into_agp(struct page *page)
+-{
+-	int i;
+-	i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+-	/* Caller's responsibility to call global_flush_tlb() for
+-	 * performance reasons */
+-	return i;
+-}
+-EXPORT_SYMBOL_GPL(map_page_into_agp);
+-
+-int unmap_page_from_agp(struct page *page)
+-{
+-	int i;
+-	i = change_page_attr(page, 1, PAGE_KERNEL);
+-	/* Caller's responsibility to call global_flush_tlb() for
+-	 * performance reasons */
+-	return i;
+-}
+-EXPORT_SYMBOL_GPL(unmap_page_from_agp);
+-#endif
+ 
+ /*
+  * Generic routines for handling agp_memory structures -
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/agp/intel-agp.c
+--- a/drivers/char/agp/intel-agp.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/agp/intel-agp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -173,9 +173,17 @@
+ 	if (page == NULL)
+ 		return NULL;
+ 
++#ifdef CONFIG_XEN
++	if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
++		__free_pages(page, 2);
++		return NULL;
 +	}
-+}
++#endif
 +
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
+ 	if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
++		change_page_attr(page, 4, PAGE_KERNEL);
+ 		global_flush_tlb();
+-		__free_page(page);
++		__free_pages(page, 2);
+ 		return NULL;
+ 	}
+ 	global_flush_tlb();
+@@ -195,9 +203,12 @@
+ 	page = virt_to_page(addr);
+ 	change_page_attr(page, 4, PAGE_KERNEL);
+ 	global_flush_tlb();
++#ifdef CONFIG_XEN
++	xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
++#endif
+ 	put_page(page);
+ 	unlock_page(page);
+-	free_pages((unsigned long)addr, 2);
++	__free_pages(page, 2);
+ 	atomic_dec(&agp_bridge->current_memory_agp);
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/mem.c
+--- a/drivers/char/mem.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/mem.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -101,6 +101,7 @@
+ }
+ #endif
+ 
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+  * This funcion reads the *physical* memory. The f_pos points directly to the 
+  * memory location. 
+@@ -223,6 +224,7 @@
+ 	*ppos += written;
+ 	return written;
+ }
++#endif
+ 
+ #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+@@ -262,6 +264,9 @@
+ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+ {
+ 	unsigned long pfn;
++#ifdef CONFIG_XEN
++	unsigned long i, count;
++#endif
+ 
+ 	/* Turn a kernel-virtual address into a physical page frame */
+ 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+@@ -275,6 +280,13 @@
+ 	 */
+ 	if (!pfn_valid(pfn))
+ 		return -EIO;
++
++#ifdef CONFIG_XEN
++	count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++	for (i = 0; i < count; i++)
++		if ((pfn + i) != mfn_to_local_pfn(pfn_to_mfn(pfn + i)))
++			return -EIO;
++#endif
+ 
+ 	vma->vm_pgoff = pfn;
+ 	return mmap_mem(file, vma);
+@@ -780,6 +792,7 @@
+ #define open_kmem	open_mem
+ #define open_oldmem	open_mem
+ 
++#ifndef ARCH_HAS_DEV_MEM
+ static const struct file_operations mem_fops = {
+ 	.llseek		= memory_lseek,
+ 	.read		= read_mem,
+@@ -787,6 +800,9 @@
+ 	.mmap		= mmap_mem,
+ 	.open		= open_mem,
+ };
++#else
++extern const struct file_operations mem_fops;
++#endif
+ 
+ static const struct file_operations kmem_fops = {
+ 	.llseek		= memory_lseek,
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/Kconfig
+--- a/drivers/char/tpm/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/tpm/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -58,5 +58,13 @@
+ 	  Further information on this driver and the supported hardware
+ 	  can be found at http://www.prosec.rub.de/tpm
+ 
++config TCG_XEN
++	tristate "XEN TPM Interface"
++	depends on TCG_TPM && XEN
++	---help---
++	  If you want to make TPM support available to a Xen user domain,
++	  say Yes and it will be accessible from within Linux.
++	  To compile this driver as a module, choose M here; the module
++	  will be called tpm_xenu.
 +
-+	v = apic_read(APIC_LVT0);
-+	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
+ endmenu
+-
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/Makefile
+--- a/drivers/char/tpm/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/tpm/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -9,3 +9,5 @@
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+ obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
++obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
++tpm_xenu-y = tpm_xen.o tpm_vtpm.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/tpm.h
+--- a/drivers/char/tpm/tpm.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/tpm/tpm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -105,6 +105,9 @@
+ 	struct dentry **bios_dir;
+ 
+ 	struct list_head list;
++#ifdef CONFIG_XEN
++	void *priv;
++#endif
+ };
+ 
+ #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+@@ -120,6 +123,18 @@
+ 	outb(index, base);
+ 	outb(value & 0xFF, base+1);
+ }
 +
-+static void disable_lapic_irq (unsigned int irq)
++#ifdef CONFIG_XEN
++static inline void *chip_get_private(const struct tpm_chip *chip)
 +{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
++	return chip->priv;
 +}
 +
-+static void ack_lapic_irq (unsigned int irq)
++static inline void chip_set_private(struct tpm_chip *chip, void *priv)
 +{
-+	ack_APIC_irq();
++	chip->priv = priv;
 +}
++#endif
+ 
+ extern void tpm_get_timeouts(struct tpm_chip *);
+ extern void tpm_gen_interrupt(struct tpm_chip *);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/tpm_vtpm.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/char/tpm/tpm_vtpm.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,542 @@
++/*
++ * Copyright (C) 2006 IBM Corporation
++ *
++ * Authors:
++ * Stefan Berger <stefanb at us.ibm.com>
++ *
++ * Generic device driver part for device drivers in a virtualized
++ * environment.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
 +
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
 +
-+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
-+	.typename = "local-APIC-edge",
-+	.startup = NULL, /* startup_irq() not used for IRQ0 */
-+	.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
-+	.enable = enable_lapic_irq,
-+	.disable = disable_lapic_irq,
-+	.ack = ack_lapic_irq,
-+	.end = end_lapic_irq,
++/* read status bits */
++enum {
++	STATUS_BUSY = 0x01,
++	STATUS_DATA_AVAIL = 0x02,
++	STATUS_READY = 0x04
 +};
 +
-+static void setup_nmi (void)
-+{
-+	/*
-+ 	 * Dirty trick to enable the NMI watchdog ...
-+	 * We put the 8259A master into AEOI mode and
-+	 * unmask on all local APICs LVT0 as NMI.
-+	 *
-+	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+	 * is from Maciej W. Rozycki - so we do not have to EOI from
-+	 * the NMI handler or the timer interrupt.
-+	 */ 
-+	printk(KERN_INFO "activating NMI Watchdog ...");
-+
-+	enable_NMI_through_LVT0(NULL);
-+
-+	printk(" done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
-+ * not support the ExtINT mode, unfortunately.  We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA.  --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+	int apic, pin, i;
-+	struct IO_APIC_route_entry entry0, entry1;
-+	unsigned char save_control, save_freq_select;
-+	unsigned long flags;
++struct transmission {
++	struct list_head next;
 +
-+	pin  = find_isa_irq_pin(8, mp_INT);
-+	apic = find_isa_irq_apic(8, mp_INT);
-+	if (pin == -1)
-+		return;
++	unsigned char *request;
++	size_t  request_len;
++	size_t  request_buflen;
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	*(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	clear_IO_APIC_pin(apic, pin);
++	unsigned char *response;
++	size_t  response_len;
++	size_t  response_buflen;
 +
-+	memset(&entry1, 0, sizeof(entry1));
++	unsigned int flags;
++};
 +
-+	entry1.dest_mode = 0;			/* physical delivery */
-+	entry1.mask = 0;			/* unmask IRQ now */
-+	entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+	entry1.delivery_mode = dest_ExtINT;
-+	entry1.polarity = entry0.polarity;
-+	entry1.trigger = 0;
-+	entry1.vector = 0;
++enum {
++	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+	save_control = CMOS_READ(RTC_CONTROL);
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+		   RTC_FREQ_SELECT);
-+	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++enum {
++	DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
 +
-+	i = 100;
-+	while (i-- > 0) {
-+		mdelay(10);
-+		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+			i -= 10;
-+	}
 +
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+	clear_IO_APIC_pin(apic, pin);
++/* local variables */
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
++/* local function prototypes */
++static int _vtpm_send_queued(struct tpm_chip *chip);
 +
-+int timer_uses_ioapic_pin_0;
 +
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
-+ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ *
-+ * FIXME: really need to revamp this for modern platforms only.
++/* =============================================================
++ * Some utility functions
++ * =============================================================
 + */
-+static inline void check_timer(void)
++static void vtpm_state_init(struct vtpm_state *vtpms)
 +{
-+	int apic1, pin1, apic2, pin2;
-+	int vector;
-+
-+	/*
-+	 * get/set the timer IRQ vector:
-+	 */
-+	disable_8259A_irq(0);
-+	vector = assign_irq_vector(0);
-+	set_intr_gate(vector, interrupt[0]);
-+
-+	/*
-+	 * Subtle, code in do_timer_interrupt() expects an AEOI
-+	 * mode for the 8259A whenever interrupts are routed
-+	 * through I/O APICs.  Also IRQ0 has to be enabled in
-+	 * the 8259A which implies the virtual wire has to be
-+	 * disabled in the local APIC.
-+	 */
-+	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+	init_8259A(1);
-+	if (timer_over_8254 > 0)
-+		enable_8259A_irq(0);
++	vtpms->current_request = NULL;
++	spin_lock_init(&vtpms->req_list_lock);
++	init_waitqueue_head(&vtpms->req_wait_queue);
++	INIT_LIST_HEAD(&vtpms->queued_requests);
 +
-+	pin1  = find_isa_irq_pin(0, mp_INT);
-+	apic1 = find_isa_irq_apic(0, mp_INT);
-+	pin2  = ioapic_i8259.pin;
-+	apic2 = ioapic_i8259.apic;
++	vtpms->current_response = NULL;
++	spin_lock_init(&vtpms->resp_list_lock);
++	init_waitqueue_head(&vtpms->resp_wait_queue);
 +
-+	if (pin1 == 0)
-+		timer_uses_ioapic_pin_0 = 1;
++	vtpms->disconnect_time = jiffies;
++}
 +
-+	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+		vector, apic1, pin1, apic2, pin2);
 +
-+	if (pin1 != -1) {
-+		/*
-+		 * Ok, does IRQ0 through the IOAPIC work?
-+		 */
-+		unmask_IO_APIC_irq(0);
-+		if (!no_timer_check && timer_irq_works()) {
-+			nmi_watchdog_default();
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				disable_8259A_irq(0);
-+				setup_nmi();
-+				enable_8259A_irq(0);
-+			}
-+			if (disable_timer_pin_1 > 0)
-+				clear_IO_APIC_pin(0, pin1);
-+			return;
-+		}
-+		clear_IO_APIC_pin(apic1, pin1);
-+		apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
-+				"connected to IO-APIC\n");
-+	}
++static inline struct transmission *transmission_alloc(void)
++{
++	return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++}
 +
-+	apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
-+				"through the 8259A ... ");
-+	if (pin2 != -1) {
-+		apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
-+			apic2, pin2);
-+		/*
-+		 * legacy devices should be connected to IO APIC #0
-+		 */
-+		setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-+		if (timer_irq_works()) {
-+			apic_printk(APIC_VERBOSE," works.\n");
-+			nmi_watchdog_default();
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				setup_nmi();
-+			}
-+			return;
++static unsigned char *
++transmission_set_req_buffer(struct transmission *t,
++                            unsigned char *buffer, size_t len)
++{
++	if (t->request_buflen < len) {
++		kfree(t->request);
++		t->request = kmalloc(len, GFP_KERNEL);
++		if (!t->request) {
++			t->request_buflen = 0;
++			return NULL;
 +		}
-+		/*
-+		 * Cleanup, just in case ...
-+		 */
-+		clear_IO_APIC_pin(apic2, pin2);
-+	}
-+	apic_printk(APIC_VERBOSE," failed.\n");
-+
-+	if (nmi_watchdog == NMI_IO_APIC) {
-+		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+		nmi_watchdog = 0;
++		t->request_buflen = len;
 +	}
 +
-+	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++	memcpy(t->request, buffer, len);
++	t->request_len = len;
 +
-+	disable_8259A_irq(0);
-+	irq_desc[0].chip = &lapic_irq_type;
-+	apic_write(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
-+	enable_8259A_irq(0);
++	return t->request;
++}
 +
-+	if (timer_irq_works()) {
-+		apic_printk(APIC_VERBOSE," works.\n");
-+		return;
++static unsigned char *
++transmission_set_res_buffer(struct transmission *t,
++                            const unsigned char *buffer, size_t len)
++{
++	if (t->response_buflen < len) {
++		kfree(t->response);
++		t->response = kmalloc(len, GFP_ATOMIC);
++		if (!t->response) {
++			t->response_buflen = 0;
++			return NULL;
++		}
++		t->response_buflen = len;
 +	}
-+	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+	apic_printk(APIC_VERBOSE," failed.\n");
-+
-+	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+	init_8259A(0);
-+	make_8259A_irq(0);
-+	apic_write(APIC_LVT0, APIC_DM_EXTINT);
 +
-+	unlock_ExtINT_logic();
++	memcpy(t->response, buffer, len);
++	t->response_len = len;
 +
-+	if (timer_irq_works()) {
-+		apic_printk(APIC_VERBOSE," works.\n");
-+		return;
-+	}
-+	apic_printk(APIC_VERBOSE," failed :(.\n");
-+	panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++	return t->response;
 +}
-+#else
-+#define check_timer() ((void)0)
-+int timer_uses_ioapic_pin_0 = 0;
-+#endif /* !CONFIG_XEN */
 +
-+static int __init notimercheck(char *s)
++static inline void transmission_free(struct transmission *t)
 +{
-+	no_timer_check = 1;
-+	return 1;
++	kfree(t->request);
++	kfree(t->response);
++	kfree(t);
 +}
-+__setup("no_timer_check", notimercheck);
 +
++/* =============================================================
++ * Interface with the lower layer driver
++ * =============================================================
++ */
 +/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ *   Linux doesn't really care, as it's not actually used
-+ *   for any interrupt handling anyway.
++ * Lower layer uses this function to make a response available.
 + */
-+#define PIC_IRQS	(1<<2)
-+
-+void __init setup_IO_APIC(void)
++int vtpm_vd_recv(const struct tpm_chip *chip,
++                 const unsigned char *buffer, size_t count,
++                 void *ptr)
 +{
-+	enable_IO_APIC();
-+
-+	if (acpi_ioapic)
-+		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
-+	else
-+		io_apic_irqs = ~PIC_IRQS;
++	unsigned long flags;
++	int ret_size = 0;
++	struct transmission *t;
++	struct vtpm_state *vtpms;
 +
-+	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
 +	/*
-+	 * Set up the IO-APIC IRQ routing table.
++	 * The list with requests must contain one request
++	 * only and the element there must be the one that
++	 * was passed to me from the front-end.
 +	 */
-+	if (!acpi_ioapic)
-+		setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+	sync_Arb_IDs();
-+#endif /* !CONFIG_XEN */
-+	setup_IO_APIC_irqs();
-+	init_IO_APIC_traps();
-+	check_timer();
-+	if (!acpi_ioapic)
-+		print_IO_APIC();
-+}
-+
-+struct sysfs_ioapic_data {
-+	struct sys_device dev;
-+	struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	int i;
-+
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++	if (vtpms->current_request != ptr) {
++		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++		return 0;
 +	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	union IO_APIC_reg_00 reg_00;
-+	int i;
-+
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(dev->id, 0);
-+	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+		io_apic_write(dev->id, 0, reg_00.raw);
-+	}
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++	if ((t = vtpms->current_request)) {
++		transmission_free(t);
++		vtpms->current_request = NULL;
 +	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+	set_kset_name("ioapic"),
-+	.suspend = ioapic_suspend,
-+	.resume = ioapic_resume,
-+};
 +
-+static int __init ioapic_init_sysfs(void)
-+{
-+	struct sys_device * dev;
-+	int i, size, error = 0;
-+
-+	error = sysdev_class_register(&ioapic_sysdev_class);
-+	if (error)
-+		return error;
-+
-+	for (i = 0; i < nr_ioapics; i++ ) {
-+		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+			* sizeof(struct IO_APIC_route_entry);
-+		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+		if (!mp_ioapic_data[i]) {
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+		memset(mp_ioapic_data[i], 0, size);
-+		dev = &mp_ioapic_data[i]->dev;
-+		dev->id = i;
-+		dev->cls = &ioapic_sysdev_class;
-+		error = sysdev_register(dev);
-+		if (error) {
-+			kfree(mp_ioapic_data[i]);
-+			mp_ioapic_data[i] = NULL;
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
++	t = transmission_alloc();
++	if (t) {
++		if (!transmission_set_res_buffer(t, buffer, count)) {
++			transmission_free(t);
++			spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++			return -ENOMEM;
 +		}
++		ret_size = count;
++		vtpms->current_response = t;
++		wake_up_interruptible(&vtpms->resp_wait_queue);
 +	}
++	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
 +
-+	return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+/* --------------------------------------------------------------------------
-+                          ACPI-based IOAPIC Configuration
-+   -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+#define IO_APIC_MAX_ID		0xFE
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return reg_01.bits.version;
++	return ret_size;
 +}
 +
 +
-+int __init io_apic_get_redir_entries (int ioapic)
++/*
++ * Lower layer indicates its status (connected/disconnected)
++ */
++void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
 +{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
++	struct vtpm_state *vtpms;
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
-+	return reg_01.bits.entries;
++	vtpms->vd_status = vd_status;
++	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++		vtpms->disconnect_time = jiffies;
++	}
 +}
 +
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 +{
-+	struct IO_APIC_route_entry entry;
++	int rc = 0;
 +	unsigned long flags;
++	struct vtpm_state *vtpms;
 +
-+	if (!IO_APIC_IRQ(irq)) {
-+		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+			ioapic);
-+		return -EINVAL;
-+	}
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
 +	/*
-+	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+	 * Note that we mask (disable) IRQs now -- these get enabled when the
-+	 * corresponding device driver registers for this IRQ.
++	 * Check if the previous operation only queued the command
++	 * In this case there won't be a response, so I just
++	 * return from here and reset that flag. In any other
++	 * case I should receive a response from the back-end.
 +	 */
++	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++	if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++		vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++		/*
++		 * The first few commands (measurements) must be
++		 * queued since it might not be possible to talk to the
++		 * TPM, yet.
++		 * Return a response of up to 30 '0's.
++		 */
 +
-+	memset(&entry,0,sizeof(entry));
-+
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.trigger = edge_level;
-+	entry.polarity = active_high_low;
-+	entry.mask = 1;					 /* Disabled (masked) */
-+
-+	irq = gsi_irq_sharing(irq);
++		count = min_t(size_t, count, 30);
++		memset(buf, 0x0, count);
++		return count;
++	}
 +	/*
-+	 * IRQs < 16 are already in the irq_2_pin[] map
++	 * Check whether something is in the responselist and if
++	 * there's nothing in the list wait for something to appear.
 +	 */
-+	if (irq >= 16)
-+		add_pin_to_irq(irq, ioapic, pin);
-+
-+	entry.vector = assign_irq_vector(irq);
-+
-+	apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
-+		"IRQ %d Mode:%i Active:%i)\n", ioapic, 
-+	       mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+	       edge_level, active_high_low);
-+
-+	ioapic_register_intr(irq, entry.vector, edge_level);
 +
-+	if (!ioapic && (irq < 16))
-+		disable_8259A_irq(irq);
++	if (!vtpms->current_response) {
++		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++		interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
++		                               1000);
++		spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
++	}
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+	set_native_irq_info(use_pci_vector() ?  entry.vector : irq, TARGET_CPUS);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	if (vtpms->current_response) {
++		struct transmission *t = vtpms->current_response;
++		vtpms->current_response = NULL;
++		rc = min(count, t->response_len);
++		memcpy(buf, t->response, rc);
++		transmission_free(t);
++	}
 +
-+	return 0;
++	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++	return rc;
 +}
 +
-+#endif /* CONFIG_ACPI */
-+
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+void __init setup_ioapic_dest(void)
++static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
 +{
-+	int pin, ioapic, irq, irq_entry;
-+
-+	if (skip_ioapic_setup == 1)
-+		return;
++	int rc = 0;
++	unsigned long flags;
++	struct transmission *t = transmission_alloc();
++	struct vtpm_state *vtpms;
 +
-+	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+			if (irq_entry == -1)
-+				continue;
-+			irq = pin_2_irq(irq_entry, ioapic, pin);
-+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+		}
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
++	if (!t)
++		return -ENOMEM;
++	/*
++	 * If there's a current request, it must be the
++	 * previous request that has timed out.
++	 */
++	spin_lock_irqsave(&vtpms->req_list_lock, flags);
++	if (vtpms->current_request != NULL) {
++		printk("WARNING: Sending although there is a request outstanding.\n"
++		       "         Previous request must have timed out.\n");
++		transmission_free(vtpms->current_request);
++		vtpms->current_request = NULL;
 +	}
-+}
-+#endif
-+#endif /* !CONFIG_XEN */
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/ioport-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/ioport-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/ioport-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,99 @@
-+/*
-+ *	linux/arch/x86_64/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
++	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
 +
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <xen/interface/physdev.h>
++	/*
++	 * Queue the packet if the driver below is not
++	 * ready, yet, or there is any packet already
++	 * in the queue.
++	 * If the driver below is ready, unqueue all
++	 * packets first before sending our current
++	 * packet.
++	 * For each unqueued packet, except for the
++	 * last (=current) packet, call the function
++	 * tpm_xen_recv to wait for the response to come
++	 * back.
++	 */
++	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++		if (time_after(jiffies,
++		               vtpms->disconnect_time + HZ * 10)) {
++			rc = -ENOENT;
++		} else {
++			goto queue_it;
++		}
++	} else {
++		/*
++		 * Send all queued packets.
++		 */
++		if (_vtpm_send_queued(chip) == 0) {
 +
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+	int i;
++			vtpms->current_request = t;
 +
-+	if (new_value)
-+		for (i = base; i < base + extent; i++)
-+			__set_bit(i, bitmap);
-+	else
-+		for (i = base; i < base + extent; i++)
-+			clear_bit(i, bitmap);
++			rc = vtpm_vd_send(vtpms->tpm_private,
++			                  buf,
++			                  count,
++			                  t);
++			/*
++			 * The generic TPM driver will call
++			 * the function to receive the response.
++			 */
++			if (rc < 0) {
++				vtpms->current_request = NULL;
++				goto queue_it;
++			}
++		} else {
++queue_it:
++			if (!transmission_set_req_buffer(t, buf, count)) {
++				transmission_free(t);
++				rc = -ENOMEM;
++				goto exit;
++			}
++			/*
++			 * An error occurred. Don't event try
++			 * to send the current request. Just
++			 * queue it.
++			 */
++			spin_lock_irqsave(&vtpms->req_list_lock, flags);
++			vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
++			list_add_tail(&t->next, &vtpms->queued_requests);
++			spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++		}
++	}
++
++exit:
++	return rc;
 +}
 +
++
 +/*
-+ * this changes the io permissions bitmap in the current task.
++ * Send all queued requests.
 + */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++static int _vtpm_send_queued(struct tpm_chip *chip)
 +{
-+	struct thread_struct * t = &current->thread;
-+	unsigned long *bitmap;
-+	struct physdev_set_iobitmap set_iobitmap;
++	int rc;
++	int error = 0;
++	long flags;
++	unsigned char buffer[1];
++	struct vtpm_state *vtpms;
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
-+	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+		return -EINVAL;
-+	if (turn_on && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
++	spin_lock_irqsave(&vtpms->req_list_lock, flags);
 +
-+	/*
-+	 * If it's the first ioperm() call in this thread's lifetime, set the
-+	 * IO bitmap up. ioperm() is much less timing critical than clone(),
-+	 * this is why we delay this operation until now:
-+	 */
-+	if (!t->io_bitmap_ptr) {
-+		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!bitmap)
-+			return -ENOMEM;
++	while (!list_empty(&vtpms->queued_requests)) {
++		/*
++		 * Need to dequeue them.
++		 * Read the result into a dummy buffer.
++		 */
++		struct transmission *qt = (struct transmission *)
++		                          vtpms->queued_requests.next;
++		list_del(&qt->next);
++		vtpms->current_request = qt;
++		spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
 +
-+		memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+		t->io_bitmap_ptr = bitmap;
++		rc = vtpm_vd_send(vtpms->tpm_private,
++		                  qt->request,
++		                  qt->request_len,
++		                  qt);
 +
-+		set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
-+		set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++		if (rc < 0) {
++			spin_lock_irqsave(&vtpms->req_list_lock, flags);
++			if ((qt = vtpms->current_request) != NULL) {
++				/*
++				 * requeue it at the beginning
++				 * of the list
++				 */
++				list_add(&qt->next,
++				         &vtpms->queued_requests);
++			}
++			vtpms->current_request = NULL;
++			error = 1;
++			break;
++		}
++		/*
++		 * After this point qt is not valid anymore!
++		 * It is freed when the front-end is delivering
++		 * the data by calling tpm_recv
++		 */
++		/*
++		 * Receive response into provided dummy buffer
++		 */
++		rc = vtpm_recv(chip, buffer, sizeof(buffer));
++		spin_lock_irqsave(&vtpms->req_list_lock, flags);
 +	}
 +
-+	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
 +
-+	return 0;
++	return error;
 +}
 +
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ */
-+
-+asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++static void vtpm_cancel(struct tpm_chip *chip)
 +{
-+	unsigned int old_iopl = current->thread.iopl;
-+	struct physdev_set_iopl set_iopl;
-+
-+	if (new_iopl > 3)
-+		return -EINVAL;
++	unsigned long flags;
++	struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
-+	/* Need "raw I/O" privileges for direct port access. */
-+	if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
++	spin_lock_irqsave(&vtpms->resp_list_lock,flags);
 +
-+	/* Change our version of the privilege levels. */
-+	current->thread.iopl = new_iopl;
++	if (!vtpms->current_response && vtpms->current_request) {
++		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++		interruptible_sleep_on(&vtpms->resp_wait_queue);
++		spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++	}
 +
-+	/* Force the change at ring 0. */
-+	set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++	if (vtpms->current_response) {
++		struct transmission *t = vtpms->current_response;
++		vtpms->current_response = NULL;
++		transmission_free(t);
++	}
 +
-+	return 0;
++	spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/irq-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/irq-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/irq-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,197 @@
-+/*
-+ *	linux/arch/x86_64/kernel/irq.c
-+ *
-+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86_64-specific interrupt
-+ * entry and irq statistics code. All the remaining irq logic is
-+ * done by the generic kernel/irq/ code and in the
-+ * x86_64-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
 +
-+#include <linux/kernel_stat.h>
-+#include <linux/interrupt.h>
-+#include <linux/seq_file.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <asm/uaccess.h>
-+#include <asm/io_apic.h>
-+#include <asm/idle.h>
-+
-+atomic_t irq_err_count;
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+atomic_t irq_mis_count;
-+#endif
-+#endif
-+
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+/*
-+ * Probabilistic stack overflow check:
-+ *
-+ * Only check the stack in process context, because everything else
-+ * runs on the big interrupt stacks. Checking reliably is too expensive,
-+ * so we just check from interrupts.
-+ */
-+static inline void stack_overflow_check(struct pt_regs *regs)
++static u8 vtpm_status(struct tpm_chip *chip)
 +{
-+	u64 curbase = (u64) current->thread_info;
-+	static unsigned long warned = -60*HZ;
++	u8 rc = 0;
++	unsigned long flags;
++	struct vtpm_state *vtpms;
 +
-+	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
-+	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
-+	    time_after(jiffies, warned + 60*HZ)) {
-+		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
-+		       current->comm, curbase, regs->rsp);
-+		show_stack(NULL,NULL);
-+		warned = jiffies;
++	vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++	/*
++	 * Data are available if:
++	 *  - there's a current response
++	 *  - the last packet was queued only (this is fake, but necessary to
++	 *      get the generic TPM layer to call the receive function.)
++	 */
++	if (vtpms->current_response ||
++	    0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
++		rc = STATUS_DATA_AVAIL;
++	} else if (!vtpms->current_response && !vtpms->current_request) {
++		rc = STATUS_READY;
 +	}
++
++	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++	return rc;
 +}
-+#endif
 +
-+/*
-+ * Generic, controller-independent functions:
-+ */
++static struct file_operations vtpm_ops = {
++	.owner = THIS_MODULE,
++	.llseek = no_llseek,
++	.open = tpm_open,
++	.read = tpm_read,
++	.write = tpm_write,
++	.release = tpm_release,
++};
 +
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+	int i = *(loff_t *) v, j;
-+	struct irqaction * action;
-+	unsigned long flags;
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
++static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
++static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
++static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
++		   NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
 +
-+	if (i == 0) {
-+		seq_printf(p, "           ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "CPU%-8d",j);
-+		seq_putc(p, '\n');
-+	}
++static struct attribute *vtpm_attrs[] = {
++	&dev_attr_pubek.attr,
++	&dev_attr_pcrs.attr,
++	&dev_attr_enabled.attr,
++	&dev_attr_active.attr,
++	&dev_attr_owned.attr,
++	&dev_attr_temp_deactivated.attr,
++	&dev_attr_caps.attr,
++	&dev_attr_cancel.attr,
++	NULL,
++};
 +
-+	if (i < NR_IRQS) {
-+		spin_lock_irqsave(&irq_desc[i].lock, flags);
-+		action = irq_desc[i].action;
-+		if (!action) 
-+			goto skip;
-+		seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+		seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+		seq_printf(p, " %14s", irq_desc[i].chip->typename);
++static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
 +
-+		seq_printf(p, "  %s", action->name);
-+		for (action=action->next; action; action = action->next)
-+			seq_printf(p, ", %s", action->name);
-+		seq_putc(p, '\n');
-+skip:
-+		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+	} else if (i == NR_IRQS) {
-+		seq_printf(p, "NMI: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
-+		seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		seq_printf(p, "LOC: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
-+		seq_putc(p, '\n');
-+#endif
-+		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+#endif
-+	}
-+	return 0;
-+}
++#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
++
++static struct tpm_vendor_specific tpm_vtpm = {
++	.recv = vtpm_recv,
++	.send = vtpm_send,
++	.cancel = vtpm_cancel,
++	.status = vtpm_status,
++	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++	.req_complete_val  = STATUS_DATA_AVAIL,
++	.req_canceled = STATUS_READY,
++	.attr_group = &vtpm_attr_grp,
++	.miscdev = {
++		.fops = &vtpm_ops,
++	},
++	.duration = {
++		TPM_LONG_TIMEOUT,
++		TPM_LONG_TIMEOUT,
++		TPM_LONG_TIMEOUT,
++	},
++};
++
++struct tpm_chip *init_vtpm(struct device *dev,
++                           struct tpm_private *tp)
++{
++	long rc;
++	struct tpm_chip *chip;
++	struct vtpm_state *vtpms;
 +
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-+{	
-+	/* high bit used in ret_from_ code  */
-+	unsigned irq = ~regs->orig_rax;
++	vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
++	if (!vtpms)
++		return ERR_PTR(-ENOMEM);
 +
-+	if (unlikely(irq >= NR_IRQS)) {
-+		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-+					__FUNCTION__, irq);
-+		BUG();
++	vtpm_state_init(vtpms);
++	vtpms->tpm_private = tp;
++
++	chip = tpm_register_hardware(dev, &tpm_vtpm);
++	if (!chip) {
++		rc = -ENODEV;
++		goto err_free_mem;
 +	}
 +
-+	exit_idle();
-+	irq_enter();
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+	stack_overflow_check(regs);
-+#endif
-+	__do_IRQ(irq, regs);
-+	irq_exit();
++	chip_set_private(chip, vtpms);
 +
-+	return 1;
++	return chip;
++
++err_free_mem:
++	kfree(vtpms);
++
++	return ERR_PTR(rc);
 +}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+void fixup_irqs(cpumask_t map)
++void cleanup_vtpm(struct device *dev)
 +{
-+	unsigned int irq;
-+	static int warned;
++	struct tpm_chip *chip = dev_get_drvdata(dev);
++	struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
++	tpm_remove_hardware(dev);
++	kfree(vtpms);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/tpm_vtpm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/char/tpm/tpm_vtpm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,55 @@
++#ifndef TPM_VTPM_H
++#define TPM_VTPM_H
 +
-+	for (irq = 0; irq < NR_IRQS; irq++) {
-+		cpumask_t mask;
-+		if (irq == 2)
-+			continue;
++struct tpm_chip;
++struct tpm_private;
 +
-+		cpus_and(mask, irq_desc[irq].affinity, map);
-+		if (any_online_cpu(mask) == NR_CPUS) {
-+			printk("Breaking affinity for irq %i\n", irq);
-+			mask = map;
-+		}
-+		if (irq_desc[irq].chip->set_affinity)
-+			irq_desc[irq].chip->set_affinity(irq, mask);
-+		else if (irq_desc[irq].action && !(warned++))
-+			printk("Cannot set affinity for irq %i\n", irq);
-+	}
++struct vtpm_state {
++	struct transmission *current_request;
++	spinlock_t           req_list_lock;
++	wait_queue_head_t    req_wait_queue;
 +
-+	/* That doesn't seem sufficient.  Give it 1ms. */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+}
-+#endif
++	struct list_head     queued_requests;
 +
-+extern void call_softirq(void);
++	struct transmission *current_response;
++	spinlock_t           resp_list_lock;
++	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
 +
-+asmlinkage void do_softirq(void)
-+{
-+ 	__u32 pending;
-+ 	unsigned long flags;
++	u8                   vd_status;
++	u8                   flags;
 +
-+ 	if (in_interrupt())
-+ 		return;
++	unsigned long        disconnect_time;
 +
-+ 	local_irq_save(flags);
-+ 	pending = local_softirq_pending();
-+ 	/* Switch to interrupt stack */
-+ 	if (pending) {
-+		call_softirq();
-+		WARN_ON_ONCE(softirq_count());
-+	}
-+ 	local_irq_restore(flags);
-+}
-+EXPORT_SYMBOL(do_softirq);
++	/*
++	 * The following is a private structure of the underlying
++	 * driver. It is passed as parameter in the send function.
++	 */
++	struct tpm_private *tpm_private;
++};
 +
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
++
++enum vdev_status {
++	TPM_VD_STATUS_DISCONNECTED = 0x0,
++	TPM_VD_STATUS_CONNECTED = 0x1
++};
++
++/* this function is called from tpm_vtpm.c */
++int vtpm_vd_send(struct tpm_private * tp,
++                 const u8 * buf, size_t count, void *ptr);
++
++/* these functions are offered by tpm_vtpm.c */
++struct tpm_chip *init_vtpm(struct device *,
++                           struct tpm_private *);
++void cleanup_vtpm(struct device *);
++int vtpm_vd_recv(const struct tpm_chip* chip,
++                 const unsigned char *buffer, size_t count, void *ptr);
++void vtpm_vd_status(const struct tpm_chip *, u8 status);
++
++static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
 +{
-+        printk("unexpected IRQ trap at vector %02x\n", irq);
++	struct tpm_chip *chip = dev_get_drvdata(dev);
++	struct vtpm_state *vtpms = chip_get_private(chip);
++	return vtpms->tpm_private;
 +}
++
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/ldt-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/ldt-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/ldt-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,282 @@
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tpm/tpm_xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/char/tpm/tpm_xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,722 @@
 +/*
-+ * linux/arch/x86_64/kernel/ldt.c
++ * Copyright (c) 2005, IBM Corporation
 + *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ * Copyright (C) 2002 Andi Kleen
-+ * 
-+ * This handles calls from both 32bit and 64bit mode.
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
 +#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
 +#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
 +
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+	if (current->active_mm)
-+               load_LDT(&current->active_mm->context);
-+}
-+#endif
++#undef DEBUG
 +
-+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
-+{
-+	void *oldldt;
-+	void *newldt;
-+	unsigned oldsize;
++/* local structures */
++struct tpm_private {
++	struct tpm_chip *chip;
 +
-+	if (mincount <= (unsigned)pc->size)
-+		return 0;
-+	oldsize = pc->size;
-+	mincount = (mincount+511)&(~511);
-+	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+	else
-+		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++	tpmif_tx_interface_t *tx;
++	atomic_t refcnt;
++	unsigned int irq;
++	u8 is_connected;
++	u8 is_suspended;
 +
-+	if (!newldt)
-+		return -ENOMEM;
++	spinlock_t tx_lock;
 +
-+	if (oldsize)
-+		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+	oldldt = pc->ldt;
-+	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+	wmb();
-+	pc->ldt = newldt;
-+	wmb();
-+	pc->size = mincount;
-+	wmb();
-+	if (reload) {
-+#ifdef CONFIG_SMP
-+		cpumask_t mask;
++	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
 +
-+		preempt_disable();
-+#endif
-+		make_pages_readonly(
-+			pc->ldt,
-+			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		load_LDT(pc);
-+#ifdef CONFIG_SMP
-+		mask = cpumask_of_cpu(smp_processor_id());
-+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+			smp_call_function(flush_ldt, NULL, 1, 1);
-+		preempt_enable();
-+#endif
-+	}
-+	if (oldsize) {
-+		make_pages_writable(
-+			oldldt,
-+			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(oldldt);
-+		else
-+			kfree(oldldt);
-+	}
-+	return 0;
-+}
++	atomic_t tx_busy;
++	void *tx_remember;
 +
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+	int err = alloc_ldt(new, old->size, 0);
-+	if (err < 0)
-+		return err;
-+	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+	make_pages_readonly(
-+		new->ldt,
-+		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+		XENFEAT_writable_descriptor_tables);
-+	return 0;
-+}
++	domid_t backend_id;
++	wait_queue_head_t wait_q;
 +
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+	struct mm_struct * old_mm;
-+	int retval = 0;
++	struct xenbus_device *dev;
++	int ring_ref;
++};
 +
-+	memset(&mm->context, 0, sizeof(mm->context));
-+	init_MUTEX(&mm->context.sem);
-+	old_mm = current->mm;
-+	if (old_mm && old_mm->context.size > 0) {
-+		down(&old_mm->context.sem);
-+		retval = copy_ldt(&mm->context, &old_mm->context);
-+		up(&old_mm->context.sem);
-+	}
-+	if (retval == 0) {
-+		spin_lock(&mm_unpinned_lock);
-+		list_add(&mm->context.unpinned, &mm_unpinned);
-+		spin_unlock(&mm_unpinned_lock);
-+	}
-+	return retval;
-+}
++struct tx_buffer {
++	unsigned int size;	// available space in data
++	unsigned int len;	// used space in data
++	unsigned char *data;	// pointer to a page
++};
 +
-+/*
-+ * 
-+ * Don't touch the LDT register - we're already in the next thread.
-+ */
-+void destroy_context(struct mm_struct *mm)
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private *my_priv;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++                             void *tpm_priv,
++                             struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static int tpmif_connect(struct xenbus_device *dev,
++                         struct tpm_private *tp,
++                         domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
++static void tpmif_free_tx_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++                                      u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++                    const u8 * buf, size_t count, int userbuffer,
++                    void *remember);
++static void destroy_tpmring(struct tpm_private *tp);
++void __exit tpmif_exit(void);
++
++#define DPRINTK(fmt, args...) \
++    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++#define GRANT_INVALID_REF	0
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
++               int isuserbuffer)
 +{
-+	if (mm->context.size) {
-+		if (mm == current->active_mm)
-+			clear_LDT();
-+		make_pages_writable(
-+			mm->context.ldt,
-+			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(mm->context.ldt);
-+		else
-+			kfree(mm->context.ldt);
-+		mm->context.size = 0;
-+	}
-+	if (!mm->context.pinned) {
-+		spin_lock(&mm_unpinned_lock);
-+		list_del(&mm->context.unpinned);
-+		spin_unlock(&mm_unpinned_lock);
++	int copied = len;
++
++	if (len > txb->size)
++		copied = txb->size;
++	if (isuserbuffer) {
++		if (copy_from_user(txb->data, src, copied))
++			return -EFAULT;
++	} else {
++		memcpy(txb->data, src, copied);
 +	}
++	txb->len = len;
++	return copied;
 +}
 +
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
++static inline struct tx_buffer *tx_buffer_alloc(void)
 +{
-+	int err;
-+	unsigned long size;
-+	struct mm_struct * mm = current->mm;
++	struct tx_buffer *txb;
 +
-+	if (!mm->context.size)
-+		return 0;
-+	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++	txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
++	if (!txb)
++		return NULL;
 +
-+	down(&mm->context.sem);
-+	size = mm->context.size*LDT_ENTRY_SIZE;
-+	if (size > bytecount)
-+		size = bytecount;
++	txb->len = 0;
++	txb->size = PAGE_SIZE;
++	txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++	if (txb->data == NULL) {
++		kfree(txb);
++		txb = NULL;
++	}
 +
-+	err = 0;
-+	if (copy_to_user(ptr, mm->context.ldt, size))
-+		err = -EFAULT;
-+	up(&mm->context.sem);
-+	if (err < 0)
-+		goto error_return;
-+	if (size != bytecount) {
-+		/* zero-fill the rest */
-+		if (clear_user(ptr+size, bytecount-size) != 0) {
-+			err = -EFAULT;
-+			goto error_return;
-+		}
++	return txb;
++}
++
++
++static inline void tx_buffer_free(struct tx_buffer *txb)
++{
++	if (txb) {
++		free_page((long)txb->data);
++		kfree(txb);
 +	}
-+	return bytecount;
-+error_return:
-+	return err;
 +}
 +
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++/**************************************************************
++ Utility function for the tpm_private structure
++**************************************************************/
++static void tpm_private_init(struct tpm_private *tp)
 +{
-+	/* Arbitrary number */ 
-+	/* x86-64 default LDT is all zeros */
-+	if (bytecount > 128) 
-+		bytecount = 128; 	
-+	if (clear_user(ptr, bytecount))
-+		return -EFAULT;
-+	return bytecount; 
++	spin_lock_init(&tp->tx_lock);
++	init_waitqueue_head(&tp->wait_q);
++	atomic_set(&tp->refcnt, 1);
 +}
 +
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++static void tpm_private_put(void)
 +{
-+	struct task_struct *me = current;
-+	struct mm_struct * mm = me->mm;
-+	__u32 entry_1, entry_2, *lp;
-+	unsigned long mach_lp;
-+	int error;
-+	struct user_desc ldt_info;
++	if (!atomic_dec_and_test(&my_priv->refcnt))
++		return;
 +
-+	error = -EINVAL;
++	tpmif_free_tx_buffers(my_priv);
++	kfree(my_priv);
++	my_priv = NULL;
++}
 +
-+	if (bytecount != sizeof(ldt_info))
-+		goto out;
-+	error = -EFAULT; 	
-+	if (copy_from_user(&ldt_info, ptr, bytecount))
-+		goto out;
++static struct tpm_private *tpm_private_get(void)
++{
++	int err;
 +
-+	error = -EINVAL;
-+	if (ldt_info.entry_number >= LDT_ENTRIES)
-+		goto out;
-+	if (ldt_info.contents == 3) {
-+		if (oldmode)
-+			goto out;
-+		if (ldt_info.seg_not_present == 0)
-+			goto out;
++	if (my_priv) {
++		atomic_inc(&my_priv->refcnt);
++		return my_priv;
 +	}
 +
-+	down(&mm->context.sem);
-+	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
-+		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+		if (error < 0)
-+			goto out_unlock;
-+	}
++	my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
++	if (!my_priv)
++		return NULL;
 +
-+	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-+ 	mach_lp = arbitrary_virt_to_machine(lp);
++	tpm_private_init(my_priv);
++	err = tpmif_allocate_tx_buffers(my_priv);
++	if (err < 0)
++		tpm_private_put();
 +
-+   	/* Allow LDTs to be cleared by the user. */
-+   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+		if (oldmode || LDT_empty(&ldt_info)) {
-+			entry_1 = 0;
-+			entry_2 = 0;
-+			goto install;
-+		}
-+	}
++	return my_priv;
++}
 +
-+	entry_1 = LDT_entry_a(&ldt_info);
-+	entry_2 = LDT_entry_b(&ldt_info);
-+	if (oldmode)
-+		entry_2 &= ~(1 << 20);
++/**************************************************************
 +
-+	/* Install the new entry ...  */
-+install:
-+	error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
 +
-+out_unlock:
-+	up(&mm->context.sem);
-+out:
-+	return error;
-+}
++**************************************************************/
 +
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++static DEFINE_MUTEX(suspend_lock);
++/*
++ * Send data via this module by calling this function
++ */
++int vtpm_vd_send(struct tpm_private *tp,
++                 const u8 * buf, size_t count, void *ptr)
 +{
-+	int ret = -ENOSYS;
++	int sent;
 +
-+	switch (func) {
-+	case 0:
-+		ret = read_ldt(ptr, bytecount);
-+		break;
-+	case 1:
-+		ret = write_ldt(ptr, bytecount, 1);
-+		break;
-+	case 2:
-+		ret = read_default_ldt(ptr, bytecount);
-+		break;
-+	case 0x11:
-+		ret = write_ldt(ptr, bytecount, 0);
-+		break;
-+	}
-+	return ret;
++	mutex_lock(&suspend_lock);
++	sent = tpm_xmit(tp, buf, count, 0, ptr);
++	mutex_unlock(&suspend_lock);
++
++	return sent;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/machine_kexec.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/machine_kexec.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/machine_kexec.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/machine_kexec.c	2007-07-30 16:35:11.000000000 +0200
-@@ -15,6 +15,113 @@
- #include <asm/mmu_context.h>
- #include <asm/io.h>
- 
-+#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
-+static u64 kexec_pgd[512] PAGE_ALIGNED;
-+static u64 kexec_pud0[512] PAGE_ALIGNED;
-+static u64 kexec_pmd0[512] PAGE_ALIGNED;
-+static u64 kexec_pte0[512] PAGE_ALIGNED;
-+static u64 kexec_pud1[512] PAGE_ALIGNED;
-+static u64 kexec_pmd1[512] PAGE_ALIGNED;
-+static u64 kexec_pte1[512] PAGE_ALIGNED;
 +
-+#ifdef CONFIG_XEN
++/**************************************************************
++ XENBUS support code
++**************************************************************/
 +
-+/* In the case of Xen, override hypervisor functions to be able to create
-+ * a regular identity mapping page table...
-+ */
++static int setup_tpmring(struct xenbus_device *dev,
++                         struct tpm_private *tp)
++{
++	tpmif_tx_interface_t *sring;
++	int err;
 +
-+#include <xen/interface/kexec.h>
-+#include <xen/interface/memory.h>
++	tp->ring_ref = GRANT_INVALID_REF;
 +
-+#define x__pmd(x) ((pmd_t) { (x) } )
-+#define x__pud(x) ((pud_t) { (x) } )
-+#define x__pgd(x) ((pgd_t) { (x) } )
++	sring = (void *)__get_free_page(GFP_KERNEL);
++	if (!sring) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++		return -ENOMEM;
++	}
++	tp->tx = sring;
 +
-+#define x_pmd_val(x)   ((x).pmd)
-+#define x_pud_val(x)   ((x).pud)
-+#define x_pgd_val(x)   ((x).pgd)
++	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++	if (err < 0) {
++		free_page((unsigned long)sring);
++		tp->tx = NULL;
++		xenbus_dev_fatal(dev, err, "allocating grant reference");
++		goto fail;
++	}
++	tp->ring_ref = err;
 +
-+static inline void x_set_pmd(pmd_t *dst, pmd_t val)
-+{
-+	x_pmd_val(*dst) = x_pmd_val(val);
-+}
++	err = tpmif_connect(dev, tp, dev->otherend_id);
++	if (err)
++		goto fail;
 +
-+static inline void x_set_pud(pud_t *dst, pud_t val)
-+{
-+	x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
++	return 0;
++fail:
++	destroy_tpmring(tp);
++	return err;
 +}
 +
-+static inline void x_pud_clear (pud_t *pud)
-+{
-+	x_pud_val(*pud) = 0;
-+}
 +
-+static inline void x_set_pgd(pgd_t *dst, pgd_t val)
++static void destroy_tpmring(struct tpm_private *tp)
 +{
-+	x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
-+}
++	tpmif_set_connected_state(tp, 0);
 +
-+static inline void x_pgd_clear (pgd_t * pgd)
-+{
-+	x_pgd_val(*pgd) = 0;
++	if (tp->ring_ref != GRANT_INVALID_REF) {
++		gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx);
++		tp->ring_ref = GRANT_INVALID_REF;
++		tp->tx = NULL;
++	}
++
++	if (tp->irq)
++		unbind_from_irqhandler(tp->irq, tp);
++
++	tp->irq = 0;
 +}
 +
-+#define X__PAGE_KERNEL_LARGE_EXEC \
-+         _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
-+#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
 +
-+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++static int talk_to_backend(struct xenbus_device *dev,
++                           struct tpm_private *tp)
++{
++	const char *message = NULL;
++	int err;
++	struct xenbus_transaction xbt;
 +
-+#if PAGES_NR > KEXEC_XEN_NO_PAGES
-+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
-+#endif
++	err = setup_tpmring(dev, tp);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "setting up ring");
++		goto out;
++	}
 +
-+#if PA_CONTROL_PAGE != 0
-+#error PA_CONTROL_PAGE is non zero - Xen support will break
-+#endif
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_tpmring;
++	}
 +
-+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
-+{
-+	void *control_page;
-+	void *table_page;
++	err = xenbus_printf(xbt, dev->nodename,
++	                    "ring-ref","%u", tp->ring_ref);
++	if (err) {
++		message = "writing ring-ref";
++		goto abort_transaction;
++	}
 +
-+	memset(xki->page_list, 0, sizeof(xki->page_list));
++	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    irq_to_evtchn_port(tp->irq));
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
 +
-+	control_page = page_address(image->control_code_page) + PAGE_SIZE;
-+	memcpy(control_page, relocate_kernel, PAGE_SIZE);
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err) {
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_tpmring;
++	}
 +
-+	table_page = page_address(image->control_code_page);
++	xenbus_switch_state(dev, XenbusStateConnected);
 +
-+	xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
-+	xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
++	return 0;
 +
-+	xki->page_list[PA_PGD] = __ma(kexec_pgd);
-+	xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
-+	xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
-+	xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
-+	xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
-+	xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
-+	xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	if (message)
++		xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++	destroy_tpmring(tp);
++out:
++	return err;
 +}
 +
-+#else /* CONFIG_XEN */
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++			    enum xenbus_state backend_state)
++{
++	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++	DPRINTK("\n");
 +
-+#define x__pmd(x) __pmd(x)
-+#define x__pud(x) __pud(x)
-+#define x__pgd(x) __pgd(x)
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateReconfiguring:
++	case XenbusStateReconfigured:
++	case XenbusStateUnknown:
++		break;
 +
-+#define x_set_pmd(x, y) set_pmd(x, y)
-+#define x_set_pud(x, y) set_pud(x, y)
-+#define x_set_pgd(x, y) set_pgd(x, y)
++	case XenbusStateConnected:
++		tpmif_set_connected_state(tp, 1);
++		break;
 +
-+#define x_pud_clear(x) pud_clear(x)
-+#define x_pgd_clear(x) pgd_clear(x)
++	case XenbusStateClosing:
++		tpmif_set_connected_state(tp, 0);
++		xenbus_frontend_closed(dev);
++		break;
 +
-+#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
-+#define X_KERNPG_TABLE _KERNPG_TABLE
++	case XenbusStateClosed:
++		tpmif_set_connected_state(tp, 0);
++		if (tp->is_suspended == 0)
++			device_unregister(&dev->dev);
++		xenbus_frontend_closed(dev);
++		break;
++	}
++}
 +
-+#endif /* CONFIG_XEN */
++static int tpmfront_probe(struct xenbus_device *dev,
++                          const struct xenbus_device_id *id)
++{
++	int err;
++	int handle;
++	struct tpm_private *tp = tpm_private_get();
 +
- static void init_level2_page(pmd_t *level2p, unsigned long addr)
- {
- 	unsigned long end_addr;
-@@ -22,7 +129,7 @@ static void init_level2_page(pmd_t *leve
- 	addr &= PAGE_MASK;
- 	end_addr = addr + PUD_SIZE;
- 	while (addr < end_addr) {
--		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-+		x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
- 		addr += PMD_SIZE;
- 	}
- }
-@@ -47,12 +154,12 @@ static int init_level3_page(struct kimag
- 		}
- 		level2p = (pmd_t *)page_address(page);
- 		init_level2_page(level2p, addr);
--		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
-+		x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
- 		addr += PUD_SIZE;
- 	}
- 	/* clear the unused entries */
- 	while (addr < end_addr) {
--		pud_clear(level3p++);
-+		x_pud_clear(level3p++);
- 		addr += PUD_SIZE;
- 	}
- out:
-@@ -83,12 +190,12 @@ static int init_level4_page(struct kimag
- 		if (result) {
- 			goto out;
- 		}
--		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
-+		x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
- 		addr += PGDIR_SIZE;
- 	}
- 	/* clear the unused entries */
- 	while (addr < end_addr) {
--		pgd_clear(level4p++);
-+		x_pgd_clear(level4p++);
- 		addr += PGDIR_SIZE;
- 	}
- out:
-@@ -99,77 +206,29 @@ out:
- static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
- {
- 	pgd_t *level4p;
--	level4p = (pgd_t *)__va(start_pgtable);
-- 	return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
--}
--
--static void set_idt(void *newidt, u16 limit)
--{
--	struct desc_ptr curidt;
-+	unsigned long x_end_pfn = end_pfn;
- 
--	/* x86-64 supports unaliged loads & stores */
--	curidt.size    = limit;
--	curidt.address = (unsigned long)newidt;
-+#ifdef CONFIG_XEN
-+	x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+#endif
- 
--	__asm__ __volatile__ (
--		"lidtq %0\n"
--		: : "m" (curidt)
--		);
--};
--
--
--static void set_gdt(void *newgdt, u16 limit)
--{
--	struct desc_ptr curgdt;
--
--	/* x86-64 supports unaligned loads & stores */
--	curgdt.size    = limit;
--	curgdt.address = (unsigned long)newgdt;
--
--	__asm__ __volatile__ (
--		"lgdtq %0\n"
--		: : "m" (curgdt)
--		);
--};
--
--static void load_segments(void)
--{
--	__asm__ __volatile__ (
--		"\tmovl %0,%%ds\n"
--		"\tmovl %0,%%es\n"
--		"\tmovl %0,%%ss\n"
--		"\tmovl %0,%%fs\n"
--		"\tmovl %0,%%gs\n"
--		: : "a" (__KERNEL_DS) : "memory"
--		);
-+	level4p = (pgd_t *)__va(start_pgtable);
-+ 	return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
- }
- 
--typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
--					unsigned long control_code_buffer,
--					unsigned long start_address,
--					unsigned long pgtable) ATTRIB_NORET;
--
--extern const unsigned char relocate_new_kernel[];
--extern const unsigned long relocate_new_kernel_size;
--
- int machine_kexec_prepare(struct kimage *image)
- {
--	unsigned long start_pgtable, control_code_buffer;
-+	unsigned long start_pgtable;
- 	int result;
- 
- 	/* Calculate the offsets */
- 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
--	control_code_buffer = start_pgtable + PAGE_SIZE;
- 
- 	/* Setup the identity mapped 64bit page table */
- 	result = init_pgtable(image, start_pgtable);
- 	if (result)
- 		return result;
- 
--	/* Place the code in the reboot code buffer */
--	memcpy(__va(control_code_buffer), relocate_new_kernel,
--						relocate_new_kernel_size);
--
- 	return 0;
- }
- 
-@@ -178,51 +237,43 @@ void machine_kexec_cleanup(struct kimage
- 	return;
- }
- 
-+#ifndef CONFIG_XEN
- /*
-  * Do not allocate memory (or fail in any way) in machine_kexec().
-  * We are past the point of no return, committed to rebooting now.
-  */
- NORET_TYPE void machine_kexec(struct kimage *image)
- {
--	unsigned long page_list;
--	unsigned long control_code_buffer;
--	unsigned long start_pgtable;
--	relocate_new_kernel_t rnk;
-+	unsigned long page_list[PAGES_NR];
-+	void *control_page;
- 
- 	/* Interrupts aren't acceptable while we reboot */
- 	local_irq_disable();
- 
--	/* Calculate the offsets */
--	page_list = image->head;
--	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
--	control_code_buffer = start_pgtable + PAGE_SIZE;
-+	control_page = page_address(image->control_code_page) + PAGE_SIZE;
-+	memcpy(control_page, relocate_kernel, PAGE_SIZE);
++	if (!tp)
++		return -ENOMEM;
 +
-+	page_list[PA_CONTROL_PAGE] = __pa(control_page);
-+	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-+	page_list[PA_PGD] = __pa(kexec_pgd);
-+	page_list[VA_PGD] = (unsigned long)kexec_pgd;
-+	page_list[PA_PUD_0] = __pa(kexec_pud0);
-+	page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
-+	page_list[PA_PMD_0] = __pa(kexec_pmd0);
-+	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-+	page_list[PA_PTE_0] = __pa(kexec_pte0);
-+	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-+	page_list[PA_PUD_1] = __pa(kexec_pud1);
-+	page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
-+	page_list[PA_PMD_1] = __pa(kexec_pmd1);
-+	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-+	page_list[PA_PTE_1] = __pa(kexec_pte1);
-+	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
++	tp->chip = init_vtpm(&dev->dev, tp);
++	if (IS_ERR(tp->chip))
++		return PTR_ERR(tp->chip);
 +
-+	page_list[PA_TABLE_PAGE] =
-+	  (unsigned long)__pa(page_address(image->control_code_page));
- 
--	/* Set the low half of the page table to my identity mapped
--	 * page table for kexec.  Leave the high half pointing at the
--	 * kernel pages.   Don't bother to flush the global pages
--	 * as that will happen when I fully switch to my identity mapped
--	 * page table anyway.
--	 */
--	memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
--	__flush_tlb();
--
--
--	/* The segment registers are funny things, they have both a
--	 * visible and an invisible part.  Whenever the visible part is
--	 * set to a specific selector, the invisible part is loaded
--	 * with from a table in memory.  At no other time is the
--	 * descriptor table in memory accessed.
--	 *
--	 * I take advantage of this here by force loading the
--	 * segments, before I zap the gdt with an invalid value.
--	 */
--	load_segments();
--	/* The gdt & idt are now invalid.
--	 * If you want to load them you must set up your own idt & gdt.
--	 */
--	set_gdt(phys_to_virt(0),0);
--	set_idt(phys_to_virt(0),0);
--	/* now call it */
--	rnk = (relocate_new_kernel_t) control_code_buffer;
--	(*rnk)(page_list, control_code_buffer, image->start, start_pgtable);
-+	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
-+			image->start);
- }
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/kernel/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -21,11 +21,13 @@ obj-$(CONFIG_MICROCODE)		+= microcode.o
- obj-$(CONFIG_X86_CPUID)		+= cpuid.o
- obj-$(CONFIG_SMP)		+= smp.o smpboot.o trampoline.o
- obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o  nmi.o
-+obj-$(CONFIG_X86_XEN_GENAPIC)	+= genapic.o genapic_xen.o
- obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o \
- 		genapic.o genapic_cluster.o genapic_flat.o
- obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
- obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
--obj-$(CONFIG_PM)		+= suspend.o
-+obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend.o
-+obj-$(CONFIG_ACPI_SLEEP)	+= suspend.o
- obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend_asm.o
- obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
- obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-@@ -55,3 +57,18 @@ i8237-y				+= ../../i386/kernel/i8237.o
- msr-$(subst m,y,$(CONFIG_X86_MSR))  += ../../i386/kernel/msr.o
- alternative-y			+= ../../i386/kernel/alternative.o
- 
-+ifdef CONFIG_XEN
-+time-y				+= ../../i386/kernel/time-xen.o
-+pci-dma-y			+= ../../i386/kernel/pci-dma-xen.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE))  := ../../i386/kernel/microcode-xen.o
-+quirks-y			:= ../../i386/kernel/quirks-xen.o
-+
-+n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o
-+
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/mpparse-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/mpparse-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/mpparse-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/mpparse-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1011 @@
-+/*
-+ *	Intel Multiprocessor Specification 1.1 and 1.4
-+ *	compliant MP-table parsing routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *		Alan Cox	:	Added EBDA scanning
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Maciej W. Rozycki:	Bits for default MP configurations
-+ *		Paul Diefenbaugh:	Added full ACPI support
-+ */
++	err = xenbus_scanf(XBT_NIL, dev->nodename,
++	                   "handle", "%i", &handle);
++	if (XENBUS_EXIST_ERR(err))
++		return err;
 +
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
++	if (err < 0) {
++		xenbus_dev_fatal(dev,err,"reading virtual-device");
++		return err;
++	}
 +
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/pgalloc.h>
-+#include <asm/io_apic.h>
-+#include <asm/proto.h>
-+#include <asm/acpi.h>
++	tp->dev = dev;
++
++	err = talk_to_backend(dev, tp);
++	if (err) {
++		tpm_private_put();
++		return err;
++	}
 +
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
++	return 0;
++}
 +
-+int acpi_found_madt;
 +
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+unsigned char apic_version [MAX_APICS];
-+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++	destroy_tpmring(tp);
++	cleanup_vtpm(&dev->dev);
++	return 0;
++}
 +
-+static int mp_current_pci_id = 0;
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++static int tpmfront_suspend(struct xenbus_device *dev)
++{
++	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++	u32 ctr;
 +
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++	/* Take the lock, preventing any application from sending. */
++	mutex_lock(&suspend_lock);
++	tp->is_suspended = 1;
 +
-+/* MP IRQ source entries */
-+int mp_irq_entries;
++	for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
++		if ((ctr % 10) == 0)
++			printk("TPM-FE [INFO]: Waiting for outstanding "
++			       "request.\n");
++		/* Wait for a request to be responded to. */
++		interruptible_sleep_on_timeout(&tp->wait_q, 100);
++	}
 +
-+int nr_ioapics;
-+int pic_mode;
-+unsigned long mp_lapic_addr = 0;
++	return 0;
++}
 +
++static int tpmfront_suspend_finish(struct tpm_private *tp)
++{
++	tp->is_suspended = 0;
++	/* Allow applications to send again. */
++	mutex_unlock(&suspend_lock);
++	return 0;
++}
 +
++static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++{
++	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++	return tpmfront_suspend_finish(tp);
++}
 +
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_id = -1U;
-+/* Internal processor count */
-+unsigned int num_processors __initdata = 0;
++static int tpmfront_resume(struct xenbus_device *dev)
++{
++	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++	destroy_tpmring(tp);
++	return talk_to_backend(dev, tp);
++}
 +
-+unsigned disabled_cpus __initdata;
++static int tpmif_connect(struct xenbus_device *dev,
++                         struct tpm_private *tp,
++                         domid_t domid)
++{
++	int err;
 +
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++	tp->backend_id = domid;
 +
-+/* ACPI MADT entry parsing functions */
-+#ifdef CONFIG_ACPI
-+extern struct acpi_boot_flags acpi_boot;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+extern int acpi_parse_lapic (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_LOCAL_APIC*/
-+#ifdef CONFIG_X86_IO_APIC
-+extern int acpi_parse_ioapic (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
++	err = bind_listening_port_to_irqhandler(
++		domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++	if (err <= 0) {
++		WPRINTK("bind_listening_port_to_irqhandler failed "
++			"(err=%d)\n", err);
++		return err;
++	}
++	tp->irq = err;
 +
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++	return 0;
++}
 +
++static struct xenbus_device_id tpmfront_ids[] = {
++	{ "vtpm" },
++	{ "" }
++};
 +
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
++static struct xenbus_driver tpmfront = {
++	.name = "vtpm",
++	.owner = THIS_MODULE,
++	.ids = tpmfront_ids,
++	.probe = tpmfront_probe,
++	.remove =  tpmfront_remove,
++	.resume = tpmfront_resume,
++	.otherend_changed = backend_changed,
++	.suspend = tpmfront_suspend,
++	.suspend_cancel = tpmfront_suspend_cancel,
++};
 +
-+/*
-+ * Checksum an MP configuration block.
-+ */
++static void __init init_tpm_xenbus(void)
++{
++	xenbus_register_frontend(&tpmfront);
++}
 +
-+static int __init mpf_checksum(unsigned char *mp, int len)
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
 +{
-+	int sum = 0;
++	unsigned int i;
 +
-+	while (len--)
-+		sum += *mp++;
++	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
++		tp->tx_buffers[i] = tx_buffer_alloc();
++		if (!tp->tx_buffers[i]) {
++			tpmif_free_tx_buffers(tp);
++			return -ENOMEM;
++		}
++	}
++	return 0;
++}
 +
-+	return sum & 0xFF;
++static void tpmif_free_tx_buffers(struct tpm_private *tp)
++{
++	unsigned int i;
++
++	for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++		tx_buffer_free(tp->tx_buffers[i]);
 +}
 +
-+#ifndef CONFIG_XEN
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++static void tpmif_rx_action(unsigned long priv)
 +{
-+	int cpu;
-+	unsigned char ver;
-+	cpumask_t tmp_map;
++	struct tpm_private *tp = (struct tpm_private *)priv;
++	int i = 0;
++	unsigned int received;
++	unsigned int offset = 0;
++	u8 *buffer;
++	tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
 +
-+	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
-+		disabled_cpus++;
-+		return;
-+	}
++	atomic_set(&tp->tx_busy, 0);
++	wake_up_interruptible(&tp->wait_q);
 +
-+	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
-+		m->mpc_apicid,
-+	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
-+	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
-+		m->mpc_apicver);
++	received = tx->size;
 +
-+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+		Dprintk("    Bootup CPU\n");
-+		boot_cpu_id = m->mpc_apicid;
-+	}
-+	if (num_processors >= NR_CPUS) {
-+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+			" Processor ignored.\n", NR_CPUS);
++	buffer = kmalloc(received, GFP_ATOMIC);
++	if (!buffer)
 +		return;
-+	}
 +
-+	num_processors++;
-+	cpus_complement(tmp_map, cpu_present_map);
-+	cpu = first_cpu(tmp_map);
++	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++		struct tx_buffer *txb = tp->tx_buffers[i];
++		tpmif_tx_request_t *tx;
++		unsigned int tocopy;
 +
-+#if MAX_APICS < 255	
-+	if ((int)m->mpc_apicid > MAX_APICS) {
-+		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
-+			m->mpc_apicid, MAX_APICS);
-+		return;
-+	}
-+#endif
-+	ver = m->mpc_apicver;
++		tx = &tp->tx->ring[i].req;
++		tocopy = tx->size;
++		if (tocopy > PAGE_SIZE)
++			tocopy = PAGE_SIZE;
 +
-+	physid_set(m->mpc_apicid, phys_cpu_present_map);
-+	/*
-+	 * Validate version
-+	 */
-+	if (ver == 0x0) {
-+		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
-+		ver = 0x10;
++		memcpy(&buffer[offset], txb->data, tocopy);
++
++		gnttab_release_grant_reference(&gref_head, tx->ref);
++
++		offset += tocopy;
 +	}
-+	apic_version[m->mpc_apicid] = ver;
-+ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ 		/*
-+ 		 * bios_cpu_apicid is required to have processors listed
-+ 		 * in same order as logical cpu numbers. Hence the first
-+ 		 * entry is BSP, and so on.
-+ 		 */
-+		cpu = 0;
-+ 	}
-+	bios_cpu_apicid[cpu] = m->mpc_apicid;
-+	x86_cpu_to_apicid[cpu] = m->mpc_apicid;
 +
-+	cpu_set(cpu, cpu_possible_map);
-+	cpu_set(cpu, cpu_present_map);
-+}
-+#else
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+	num_processors++;
++	vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
++	kfree(buffer);
 +}
-+#endif /* CONFIG_XEN */
 +
-+static void __init MP_bus_info (struct mpc_config_bus *m)
++
++static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
 +{
-+	char str[7];
++	struct tpm_private *tp = tpm_priv;
++	unsigned long flags;
 +
-+	memcpy(str, m->mpc_bustype, 6);
-+	str[6] = 0;
-+	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++	spin_lock_irqsave(&tp->tx_lock, flags);
++	tpmif_rx_tasklet.data = (unsigned long)tp;
++	tasklet_schedule(&tpmif_rx_tasklet);
++	spin_unlock_irqrestore(&tp->tx_lock, flags);
 +
-+	if (strncmp(str, "ISA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+	} else if (strncmp(str, "EISA", 4) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+	} else if (strncmp(str, "PCI", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+		mp_current_pci_id++;
-+	} else if (strncmp(str, "MCA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+	} else {
-+		printk(KERN_ERR "Unknown bustype %s\n", str);
-+	}
++	return IRQ_HANDLED;
 +}
 +
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++
++static int tpm_xmit(struct tpm_private *tp,
++                    const u8 * buf, size_t count, int isuserbuffer,
++                    void *remember)
 +{
-+	if (!(m->mpc_flags & MPC_APIC_USABLE))
-+		return;
++	tpmif_tx_request_t *tx;
++	TPMIF_RING_IDX i;
++	unsigned int offset = 0;
 +
-+	printk("I/O APIC #%d Version %d at 0x%X.\n",
-+		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+			MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++	spin_lock_irq(&tp->tx_lock);
++
++	if (unlikely(atomic_read(&tp->tx_busy))) {
++		printk("tpm_xmit: There's an outstanding request/response "
++		       "on the way!\n");
++		spin_unlock_irq(&tp->tx_lock);
++		return -EBUSY;
 +	}
-+	if (!m->mpc_apicaddr) {
-+		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+			" found in MP table, skipping!\n");
-+		return;
++
++	if (tp->is_connected != 1) {
++		spin_unlock_irq(&tp->tx_lock);
++		return -EIO;
 +	}
-+	mp_ioapics[nr_ioapics] = *m;
-+	nr_ioapics++;
++
++	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++		struct tx_buffer *txb = tp->tx_buffers[i];
++		int copied;
++
++		if (!txb) {
++			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++				"Not transmitting anything!\n", i);
++			spin_unlock_irq(&tp->tx_lock);
++			return -EFAULT;
++		}
++
++		copied = tx_buffer_copy(txb, &buf[offset], count,
++		                        isuserbuffer);
++		if (copied < 0) {
++			/* An error occurred */
++			spin_unlock_irq(&tp->tx_lock);
++			return copied;
++		}
++		count -= copied;
++		offset += copied;
++
++		tx = &tp->tx->ring[i].req;
++		tx->addr = virt_to_machine(txb->data);
++		tx->size = txb->len;
++		tx->unused = 0;
++
++		DPRINTK("First 4 characters sent by TPM-FE are "
++			"0x%02x 0x%02x 0x%02x 0x%02x\n",
++		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++		/* Get the granttable reference for this page. */
++		tx->ref = gnttab_claim_grant_reference(&gref_head);
++		if (tx->ref == -ENOSPC) {
++			spin_unlock_irq(&tp->tx_lock);
++			DPRINTK("Grant table claim reference failed in "
++				"func:%s line:%d file:%s\n",
++				__FUNCTION__, __LINE__, __FILE__);
++			return -ENOSPC;
++		}
++		gnttab_grant_foreign_access_ref(tx->ref,
++						tp->backend_id,
++						virt_to_mfn(txb->data),
++						0 /*RW*/);
++		wmb();
++	}
++
++	atomic_set(&tp->tx_busy, 1);
++	tp->tx_remember = remember;
++
++	mb();
++
++	notify_remote_via_irq(tp->irq);
++
++	spin_unlock_irq(&tp->tx_lock);
++	return offset;
 +}
 +
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
 +{
-+	mp_irqs [mp_irq_entries] = *m;
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+	if (++mp_irq_entries >= MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!!\n");
++	/* Notify upper layer about the state of the connection to the BE. */
++	vtpm_vd_status(tp->chip, (tp->is_connected
++				  ? TPM_VD_STATUS_CONNECTED
++				  : TPM_VD_STATUS_DISCONNECTED));
 +}
 +
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
 +{
-+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
 +	/*
-+	 * Well it seems all SMP boards in existence
-+	 * use ExtINT/LVT1 == LINT0 and
-+	 * NMI/LVT2 == LINT1 - the following check
-+	 * will show us if this assumptions is false.
-+	 * Until then we do not have to add baggage.
++	 * Don't notify upper layer if we are in suspend mode and
++	 * should disconnect - assumption is that we will resume
++	 * The mutex keeps apps from sending.
 +	 */
-+	if ((m->mpc_irqtype == mp_ExtINT) &&
-+		(m->mpc_destapiclint != 0))
-+			BUG();
-+	if ((m->mpc_irqtype == mp_NMI) &&
-+		(m->mpc_destapiclint != 1))
-+			BUG();
++	if (is_connected == 0 && tp->is_suspended == 1)
++		return;
++
++	/*
++	 * Unlock the mutex if we are connected again
++	 * after being suspended - now resuming.
++	 * This also removes the suspend state.
++	 */
++	if (is_connected == 1 && tp->is_suspended == 1)
++		tpmfront_suspend_finish(tp);
++
++	if (is_connected != tp->is_connected) {
++		tp->is_connected = is_connected;
++		tpmif_notify_upperlayer(tp);
++	}
 +}
 +
-+/*
-+ * Read/parse the MPC
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
 + */
 +
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
++
++static int __init tpmif_init(void)
 +{
-+	char str[16];
-+	int count=sizeof(*mpc);
-+	unsigned char *mpt=((unsigned char *)mpc)+count;
++	struct tpm_private *tp;
 +
-+	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+		printk("SMP mptable: bad signature [%c%c%c%c]!\n",
-+			mpc->mpc_signature[0],
-+			mpc->mpc_signature[1],
-+			mpc->mpc_signature[2],
-+			mpc->mpc_signature[3]);
-+		return 0;
-+	}
-+	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+		printk("SMP mptable: checksum error!\n");
-+		return 0;
-+	}
-+	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+			mpc->mpc_spec);
-+		return 0;
-+	}
-+	if (!mpc->mpc_lapic) {
-+		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+		return 0;
++	if (is_initial_xendomain())
++		return -EPERM;
++
++	tp = tpm_private_get();
++	if (!tp)
++		return -ENOMEM;
++
++	IPRINTK("Initialising the vTPM driver.\n");
++	if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
++					  &gref_head) < 0) {
++		tpm_private_put();
++		return -EFAULT;
 +	}
-+	memcpy(str,mpc->mpc_oem,8);
-+	str[8]=0;
-+	printk(KERN_INFO "OEM ID: %s ",str);
 +
-+	memcpy(str,mpc->mpc_productid,12);
-+	str[12]=0;
-+	printk("Product ID: %s ",str);
++	init_tpm_xenbus();
++	return 0;
++}
 +
-+	printk("APIC at: 0x%X\n",mpc->mpc_lapic);
 +
-+	/* save the local APIC address, it might be non-default */
-+	if (!acpi_lapic)
-+	mp_lapic_addr = mpc->mpc_lapic;
++module_init(tpmif_init);
 +
-+	/*
-+	 *	Now process the configuration blocks.
-+	 */
-+	while (count < mpc->mpc_length) {
-+		switch(*mpt) {
-+			case MP_PROCESSOR:
-+			{
-+				struct mpc_config_processor *m=
-+					(struct mpc_config_processor *)mpt;
-+				if (!acpi_lapic)
-+				MP_processor_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_BUS:
-+			{
-+				struct mpc_config_bus *m=
-+					(struct mpc_config_bus *)mpt;
-+				MP_bus_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_IOAPIC:
-+			{
-+				struct mpc_config_ioapic *m=
-+					(struct mpc_config_ioapic *)mpt;
-+				MP_ioapic_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_INTSRC:
-+			{
-+				struct mpc_config_intsrc *m=
-+					(struct mpc_config_intsrc *)mpt;
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/char/tty_io.c
+--- a/drivers/char/tty_io.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/char/tty_io.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -129,6 +129,8 @@
+ /* Semaphore to protect creating and releasing a tty. This is shared with
+    vt.c for deeply disgusting hack reasons */
+ DEFINE_MUTEX(tty_mutex);
++
++int console_use_vt = 1;
+ 
+ #ifdef CONFIG_UNIX98_PTYS
+ extern struct tty_driver *ptm_driver;	/* Unix98 pty masters; for /dev/ptmx */
+@@ -2483,7 +2485,7 @@
+ 		goto got_driver;
+ 	}
+ #ifdef CONFIG_VT
+-	if (device == MKDEV(TTY_MAJOR,0)) {
++	if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
+ 		extern struct tty_driver *console_driver;
+ 		driver = console_driver;
+ 		index = fg_console;
+@@ -3909,6 +3911,8 @@
+ #endif
+ 
+ #ifdef CONFIG_VT
++	if (!console_use_vt)
++		goto out_vt;
+ 	cdev_init(&vc0_cdev, &console_fops);
+ 	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+ 	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
+@@ -3916,6 +3920,7 @@
+ 	class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+ 
+ 	vty_init();
++ out_vt:
+ #endif
+ 	return 0;
+ }
+diff -r d894e36cfc30 -r 0aa021803deb drivers/cpufreq/Kconfig
+--- a/drivers/cpufreq/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/cpufreq/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,6 @@
+ config CPU_FREQ
+ 	bool "CPU Frequency scaling"
++	depends on !PROCESSOR_EXTERNAL_CONTROL
+ 	help
+ 	  CPU Frequency scaling allows you to change the clock speed of 
+ 	  CPUs on the fly. This is a nice method to save power, because 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/cpufreq/cpufreq_ondemand.c
+--- a/drivers/cpufreq/cpufreq_ondemand.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/cpufreq/cpufreq_ondemand.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -95,6 +95,7 @@
+ 
+ 	return retval;
+ }
 +
-+				MP_intsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_LINTSRC:
-+			{
-+				struct mpc_config_lintsrc *m=
-+					(struct mpc_config_lintsrc *)mpt;
-+				MP_lintsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+		}
-+	}
-+	clustered_apic_check();
-+	if (!num_processors)
-+		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+	return num_processors;
+ 
+ /************************** sysfs interface ************************/
+ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+@@ -222,17 +223,14 @@
+ 
+ /************************** sysfs end ************************/
+ 
+-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
++#ifndef CONFIG_XEN
++static int dbs_calc_load(struct cpu_dbs_info_s *this_dbs_info)
+ {
+-	unsigned int idle_ticks, total_ticks;
+-	unsigned int load;
++	struct cpufreq_policy *policy;
+ 	cputime64_t cur_jiffies;
+-
+-	struct cpufreq_policy *policy;
++	cputime64_t total_ticks, idle_ticks;
+ 	unsigned int j;
+-
+-	if (!this_dbs_info->enable)
+-		return;
++	int load;
+ 
+ 	policy = this_dbs_info->cur_policy;
+ 	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+@@ -240,7 +238,7 @@
+ 			this_dbs_info->prev_cpu_wall);
+ 	this_dbs_info->prev_cpu_wall = cur_jiffies;
+ 	if (!total_ticks)
+-		return;
++		return 200;
+ 	/*
+ 	 * Every sampling_rate, we check, if current idle time is less
+ 	 * than 20% (default), then we try to increase frequency
+@@ -270,6 +268,81 @@
+ 			idle_ticks = tmp_idle_ticks;
+ 	}
+ 	load = (100 * (total_ticks - idle_ticks)) / total_ticks;
++	return load;
 +}
++#else
 +
-+static int __init ELCR_trigger(unsigned int irq)
++#include <xen/interface/platform.h>
++static int dbs_calc_load(struct cpu_dbs_info_s *this_dbs_info)
 +{
-+	unsigned int port;
-+
-+	port = 0x4d0 + (irq >> 3);
-+	return (inb(port) >> (irq & 7)) & 1;
++	int load = 0;
++	struct xen_platform_op op;
++	uint64_t idletime[NR_CPUS];
++	struct cpufreq_policy *policy;
++	unsigned int j;
++	cpumask_t cpumap;
++
++	policy = this_dbs_info->cur_policy;
++	cpumap = policy->cpus;
++
++	op.cmd = XENPF_getidletime;
++	set_xen_guest_handle(op.u.getidletime.cpumap_bitmap, (uint8_t *) cpus_addr(cpumap));
++	op.u.getidletime.cpumap_nr_cpus = NR_CPUS;
++	set_xen_guest_handle(op.u.getidletime.idletime, idletime);
++	if (HYPERVISOR_platform_op(&op))
++		return 200;
++
++	for_each_cpu_mask(j, cpumap) {
++		cputime64_t total_idle_nsecs, tmp_idle_nsecs;
++		cputime64_t total_wall_nsecs, tmp_wall_nsecs;
++		struct cpu_dbs_info_s *j_dbs_info;
++		unsigned long tmp_load, tmp_wall_msecs, tmp_idle_msecs;
++
++		j_dbs_info = &per_cpu(cpu_dbs_info, j);
++		total_idle_nsecs = idletime[j];
++		tmp_idle_nsecs = cputime64_sub(total_idle_nsecs,
++				j_dbs_info->prev_cpu_idle);
++		total_wall_nsecs = op.u.getidletime.now;
++		tmp_wall_nsecs = cputime64_sub(total_wall_nsecs,
++				j_dbs_info->prev_cpu_wall);
++
++		if (tmp_wall_nsecs == 0)
++			return 200;
++
++		j_dbs_info->prev_cpu_wall = total_wall_nsecs;
++		j_dbs_info->prev_cpu_idle = total_idle_nsecs;
++
++		/* Convert nsecs to msecs and clamp times to sane values. */
++		do_div(tmp_wall_nsecs, 1000000);
++		tmp_wall_msecs = tmp_wall_nsecs;
++		do_div(tmp_idle_nsecs, 1000000);
++		tmp_idle_msecs = tmp_idle_nsecs;
++		if (tmp_wall_msecs == 0)
++			tmp_wall_msecs = 1;
++		if (tmp_idle_msecs > tmp_wall_msecs)
++			tmp_idle_msecs = tmp_wall_msecs;
++
++		tmp_load = (100 * (tmp_wall_msecs - tmp_idle_msecs)) /
++				tmp_wall_msecs;
++		load = max(load, min(100, (int) tmp_load));
++	}
++	return load;
 +}
++#endif
 +
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
++static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 +{
-+	struct mpc_config_intsrc intsrc;
-+	int i;
-+	int ELCR_fallback = 0;
++	int load;
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;			/* conforming */
-+	intsrc.mpc_srcbus = 0;
-+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++	struct cpufreq_policy *policy;
 +
-+	intsrc.mpc_irqtype = mp_INT;
++	if (!this_dbs_info->enable)
++		return;
 +
-+	/*
-+	 *  If true, we have an ISA/PCI system with no IRQ entries
-+	 *  in the MP table. To prevent the PCI interrupts from being set up
-+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+	 *  never be level sensitive, so we simply see if the ELCR agrees.
-+	 *  If it does, we assume it's valid.
-+	 */
-+	if (mpc_default_type == 5) {
-+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++	policy = this_dbs_info->cur_policy;
++	load = dbs_calc_load(this_dbs_info);
++	if (load > 100) 
++		return;
+ 
+ 	/* Check for frequency increase */
+ 	if (load > dbs_tuners_ins.up_threshold) {
+diff -r d894e36cfc30 -r 0aa021803deb drivers/cpufreq/cpufreq_stats.c
+--- a/drivers/cpufreq/cpufreq_stats.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/cpufreq/cpufreq_stats.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -292,6 +292,9 @@
+ 	if (old_index == new_index)
+ 		return 0;
+ 
++        if ((old_index < 0) || (new_index < 0))
++                return 0;
 +
-+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-+		else {
-+			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+			ELCR_fallback = 1;
-+		}
-+	}
+ 	spin_lock(&cpufreq_stats_lock);
+ 	stat->last_index = new_index;
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+diff -r d894e36cfc30 -r 0aa021803deb drivers/ide/ide-lib.c
+--- a/drivers/ide/ide-lib.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/ide/ide-lib.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -408,10 +408,10 @@
+ {
+ 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
+ 
+-	if (!PCI_DMA_BUS_IS_PHYS) {
+-		addr = BLK_BOUNCE_ANY;
+-	} else if (on && drive->media == ide_disk) {
+-		if (HWIF(drive)->pci_dev)
++	if (on && drive->media == ide_disk) {
++		if (!PCI_DMA_BUS_IS_PHYS)
++			addr = BLK_BOUNCE_ANY;
++		else if (HWIF(drive)->pci_dev)
+ 			addr = HWIF(drive)->pci_dev->dma_mask;
+ 	}
+ 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/buffer_sync.c
+--- a/drivers/oprofile/buffer_sync.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/buffer_sync.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -5,6 +5,10 @@
+  * @remark Read the file COPYING
+  *
+  * @author John Levon <levon at movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+  *
+  * This is the core of the buffer management. Each
+  * CPU buffer is processed and entered into the
+@@ -38,6 +42,7 @@
+ static DEFINE_SPINLOCK(task_mortuary);
+ static void process_task_mortuary(void);
+ 
++static int cpu_current_domain[NR_CPUS];
+ 
+ /* Take ownership of the task struct and place it on the
+  * list for processing. Only after two full buffer syncs
+@@ -146,6 +151,11 @@
+ int sync_start(void)
+ {
+ 	int err;
++	int i;
 +
-+	for (i = 0; i < 16; i++) {
-+		switch (mpc_default_type) {
-+		case 2:
-+			if (i == 0 || i == 13)
-+				continue;	/* IRQ0 & IRQ13 not connected */
-+			/* fall through */
-+		default:
-+			if (i == 2)
-+				continue;	/* IRQ2 is never connected */
-+		}
++	for (i = 0; i < NR_CPUS; i++) {
++		cpu_current_domain[i] = COORDINATOR_DOMAIN;
++	}
+ 
+ 	start_cpu_work();
+ 
+@@ -275,15 +285,31 @@
+ 	last_cookie = INVALID_COOKIE;
+ }
+ 
+-static void add_kernel_ctx_switch(unsigned int in_kernel)
++static void add_cpu_mode_switch(unsigned int cpu_mode)
+ {
+ 	add_event_entry(ESCAPE_CODE);
+-	if (in_kernel)
+-		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
+-	else
+-		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
++	switch (cpu_mode) {
++	case CPU_MODE_USER:
++		add_event_entry(USER_ENTER_SWITCH_CODE);
++		break;
++	case CPU_MODE_KERNEL:
++		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
++		break;
++	case CPU_MODE_XEN:
++		add_event_entry(XEN_ENTER_SWITCH_CODE);
++	  	break;
++	default:
++		break;
++	}
+ }
+- 
 +
-+		if (ELCR_fallback) {
-+			/*
-+			 *  If the ELCR indicates a level-sensitive interrupt, we
-+			 *  copy that information over to the MP table in the
-+			 *  irqflag field (level sensitive, active high polarity).
-+			 */
-+			if (ELCR_trigger(i))
-+				intsrc.mpc_irqflag = 13;
-+			else
-+				intsrc.mpc_irqflag = 0;
-+		}
++static void add_domain_switch(unsigned long domain_id)
++{
++	add_event_entry(ESCAPE_CODE);
++	add_event_entry(DOMAIN_SWITCH_CODE);
++	add_event_entry(domain_id);
++}
 +
-+		intsrc.mpc_srcbusirq = i;
-+		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-+		MP_intsrc_info(&intsrc);
+ static void
+ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+ {
+@@ -348,9 +374,9 @@
+  * for later lookup from userspace.
+  */
+ static int
+-add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
++add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
+ {
+-	if (in_kernel) {
++	if (cpu_mode >= CPU_MODE_KERNEL) {
+ 		add_sample_entry(s->eip, s->event);
+ 		return 1;
+ 	} else if (mm) {
+@@ -496,15 +522,21 @@
+ 	struct mm_struct *mm = NULL;
+ 	struct task_struct * new;
+ 	unsigned long cookie = 0;
+-	int in_kernel = 1;
++	int cpu_mode = 1;
+ 	unsigned int i;
+ 	sync_buffer_state state = sb_buffer_start;
+ 	unsigned long available;
++	int domain_switch = 0;
+ 
+ 	mutex_lock(&buffer_mutex);
+  
+ 	add_cpu_switch(cpu);
+ 
++	/* We need to assign the first samples in this CPU buffer to the
++	   same domain that we were processing at the last sync_buffer */
++	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++		add_domain_switch(cpu_current_domain[cpu]);
++	}
+ 	/* Remember, only we can modify tail_pos */
+ 
+ 	available = get_slots(cpu_buf);
+@@ -512,16 +544,18 @@
+ 	for (i = 0; i < available; ++i) {
+ 		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
+  
+-		if (is_code(s->eip)) {
+-			if (s->event <= CPU_IS_KERNEL) {
+-				/* kernel/userspace switch */
+-				in_kernel = s->event;
++		if (is_code(s->eip) && !domain_switch) {
++			if (s->event <= CPU_MODE_XEN) {
++				/* xen/kernel/userspace switch */
++				cpu_mode = s->event;
+ 				if (state == sb_buffer_start)
+ 					state = sb_sample_start;
+-				add_kernel_ctx_switch(s->event);
++				add_cpu_mode_switch(s->event);
+ 			} else if (s->event == CPU_TRACE_BEGIN) {
+ 				state = sb_bt_start;
+ 				add_trace_begin();
++			} else if (s->event == CPU_DOMAIN_SWITCH) {
++					domain_switch = 1;				
+ 			} else {
+ 				struct mm_struct * oldmm = mm;
+ 
+@@ -535,11 +569,21 @@
+ 				add_user_ctx_switch(new, cookie);
+ 			}
+ 		} else {
+-			if (state >= sb_bt_start &&
+-			    !add_sample(mm, s, in_kernel)) {
+-				if (state == sb_bt_start) {
+-					state = sb_bt_ignore;
+-					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++			if (domain_switch) {
++				cpu_current_domain[cpu] = s->eip;
++				add_domain_switch(s->eip);
++				domain_switch = 0;
++			} else {
++				if (cpu_current_domain[cpu] !=
++				    COORDINATOR_DOMAIN) {
++					add_sample_entry(s->eip, s->event);
++				}
++				else  if (state >= sb_bt_start &&
++				    !add_sample(mm, s, cpu_mode)) {
++					if (state == sb_bt_start) {
++						state = sb_bt_ignore;
++						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++					}
+ 				}
+ 			}
+ 		}
+@@ -548,6 +592,11 @@
+ 	}
+ 	release_mm(mm);
+ 
++	/* We reset domain to COORDINATOR at each CPU switch */
++	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++		add_domain_switch(COORDINATOR_DOMAIN);
 +	}
 +
-+	intsrc.mpc_irqtype = mp_ExtINT;
-+	intsrc.mpc_srcbusirq = 0;
-+	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-+	MP_intsrc_info(&intsrc);
-+}
+ 	mark_done(cpu);
+ 
+ 	mutex_unlock(&buffer_mutex);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/cpu_buffer.c
+--- a/drivers/oprofile/cpu_buffer.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/cpu_buffer.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -5,6 +5,10 @@
+  * @remark Read the file COPYING
+  *
+  * @author John Levon <levon at movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+  *
+  * Each CPU has a local buffer that stores PC value/event
+  * pairs. We also log context switches when we notice them.
+@@ -34,6 +38,8 @@
+ #define DEFAULT_TIMER_EXPIRE (HZ / 10)
+ static int work_enabled;
+ 
++static int32_t current_domain = COORDINATOR_DOMAIN;
 +
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
+ void free_cpu_buffers(void)
+ {
+ 	int i;
+@@ -57,7 +63,7 @@
+ 			goto fail;
+  
+ 		b->last_task = NULL;
+-		b->last_is_kernel = -1;
++		b->last_cpu_mode = -1;
+ 		b->tracing = 0;
+ 		b->buffer_size = buffer_size;
+ 		b->tail_pos = 0;
+@@ -113,7 +119,7 @@
+ 	 * collected will populate the buffer with proper
+ 	 * values to initialize the buffer
+ 	 */
+-	cpu_buf->last_is_kernel = -1;
++	cpu_buf->last_cpu_mode = -1;
+ 	cpu_buf->last_task = NULL;
+ }
+ 
+@@ -163,13 +169,13 @@
+  * because of the head/tail separation of the writer and reader
+  * of the CPU buffer.
+  *
+- * is_kernel is needed because on some architectures you cannot
++ * cpu_mode is needed because on some architectures you cannot
+  * tell if you are in kernel or user space simply by looking at
+- * pc. We tag this in the buffer by generating kernel enter/exit
+- * events whenever is_kernel changes
++ * pc. We tag this in the buffer by generating kernel/user (and xen)
++ *  enter events whenever cpu_mode changes
+  */
+ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+-		      int is_kernel, unsigned long event)
++		      int cpu_mode, unsigned long event)
+ {
+ 	struct task_struct * task;
+ 
+@@ -180,18 +186,18 @@
+ 		return 0;
+ 	}
+ 
+-	is_kernel = !!is_kernel;
+-
+ 	task = current;
+ 
+ 	/* notice a switch from user->kernel or vice versa */
+-	if (cpu_buf->last_is_kernel != is_kernel) {
+-		cpu_buf->last_is_kernel = is_kernel;
+-		add_code(cpu_buf, is_kernel);
++	if (cpu_buf->last_cpu_mode != cpu_mode) {
++		cpu_buf->last_cpu_mode = cpu_mode;
++		add_code(cpu_buf, cpu_mode);
+ 	}
+-
++	
+ 	/* notice a task switch */
+-	if (cpu_buf->last_task != task) {
++	/* if not processing other domain samples */
++	if ((cpu_buf->last_task != task) &&
++	    (current_domain == COORDINATOR_DOMAIN)) {
+ 		cpu_buf->last_task = task;
+ 		add_code(cpu_buf, (unsigned long)task);
+ 	}
+@@ -275,6 +281,25 @@
+ 	add_sample(cpu_buf, pc, 0);
+ }
+ 
++int oprofile_add_domain_switch(int32_t domain_id)
 +{
-+	struct mpc_config_processor processor;
-+	struct mpc_config_bus bus;
-+	struct mpc_config_ioapic ioapic;
-+	struct mpc_config_lintsrc lintsrc;
-+	int linttypes[2] = { mp_ExtINT, mp_NMI };
-+	int i;
++	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
 +
-+	/*
-+	 * local APIC has default address
-+	 */
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++	/* should have space for switching into and out of domain 
++	   (2 slots each) plus one sample and one cpu mode switch */
++	if (((nr_available_slots(cpu_buf) < 6) && 
++	     (domain_id != COORDINATOR_DOMAIN)) ||
++	    (nr_available_slots(cpu_buf) < 2))
++		return 0;
 +
-+	/*
-+	 * 2 CPUs, numbered 0 & 1.
-+	 */
-+	processor.mpc_type = MP_PROCESSOR;
-+	/* Either an integrated APIC or a discrete 82489DX. */
-+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	processor.mpc_cpuflag = CPU_ENABLED;
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+				   (boot_cpu_data.x86_model << 4) |
-+				   boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+	for (i = 0; i < 2; i++) {
-+		processor.mpc_apicid = i;
-+		MP_processor_info(&processor);
-+	}
++	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
++	add_sample(cpu_buf, domain_id, 0);
 +
-+	bus.mpc_type = MP_BUS;
-+	bus.mpc_busid = 0;
-+	switch (mpc_default_type) {
-+		default:
-+			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-+				mpc_default_type);
-+			/* fall through */
-+		case 1:
-+		case 5:
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			break;
-+		case 2:
-+		case 6:
-+		case 3:
-+			memcpy(bus.mpc_bustype, "EISA  ", 6);
-+			break;
-+		case 4:
-+		case 7:
-+			memcpy(bus.mpc_bustype, "MCA   ", 6);
-+	}
-+	MP_bus_info(&bus);
-+	if (mpc_default_type > 4) {
-+		bus.mpc_busid = 1;
-+		memcpy(bus.mpc_bustype, "PCI   ", 6);
-+		MP_bus_info(&bus);
-+	}
++	current_domain = domain_id;
 +
-+	ioapic.mpc_type = MP_IOAPIC;
-+	ioapic.mpc_apicid = 2;
-+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	ioapic.mpc_flags = MPC_APIC_USABLE;
-+	ioapic.mpc_apicaddr = 0xFEC00000;
-+	MP_ioapic_info(&ioapic);
++	return 1;
++}
 +
-+	/*
-+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+	 */
-+	construct_default_ioirq_mptable(mpc_default_type);
+ /*
+  * This serves to avoid cpu buffer overflow, and makes sure
+  * the task mortuary progresses
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/cpu_buffer.h
+--- a/drivers/oprofile/cpu_buffer.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/cpu_buffer.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -36,7 +36,7 @@
+ 	volatile unsigned long tail_pos;
+ 	unsigned long buffer_size;
+ 	struct task_struct * last_task;
+-	int last_is_kernel;
++	int last_cpu_mode;
+ 	int tracing;
+ 	struct op_sample * buffer;
+ 	unsigned long sample_received;
+@@ -51,7 +51,10 @@
+ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+ 
+ /* transient events for the CPU buffer -> event buffer */
+-#define CPU_IS_KERNEL 1
+-#define CPU_TRACE_BEGIN 2
++#define CPU_MODE_USER           0
++#define CPU_MODE_KERNEL         1
++#define CPU_MODE_XEN            2
++#define CPU_TRACE_BEGIN         3
++#define CPU_DOMAIN_SWITCH       4
+ 
+ #endif /* OPROFILE_CPU_BUFFER_H */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/event_buffer.h
+--- a/drivers/oprofile/event_buffer.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/event_buffer.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -29,14 +29,19 @@
+ #define CPU_SWITCH_CODE 		2
+ #define COOKIE_SWITCH_CODE 		3
+ #define KERNEL_ENTER_SWITCH_CODE	4
+-#define KERNEL_EXIT_SWITCH_CODE		5
++#define USER_ENTER_SWITCH_CODE		5
+ #define MODULE_LOADED_CODE		6
+ #define CTX_TGID_CODE			7
+ #define TRACE_BEGIN_CODE		8
+ #define TRACE_END_CODE			9
++#define XEN_ENTER_SWITCH_CODE		10
++#define DOMAIN_SWITCH_CODE		11
+  
+ #define INVALID_COOKIE ~0UL
+ #define NO_COOKIE 0UL
 +
-+	lintsrc.mpc_type = MP_LINTSRC;
-+	lintsrc.mpc_irqflag = 0;		/* conforming */
-+	lintsrc.mpc_srcbusid = 0;
-+	lintsrc.mpc_srcbusirq = 0;
-+	lintsrc.mpc_destapic = MP_APIC_ALL;
-+	for (i = 0; i < 2; i++) {
-+		lintsrc.mpc_irqtype = linttypes[i];
-+		lintsrc.mpc_destapiclint = i;
-+		MP_lintsrc_info(&lintsrc);
-+	}
-+}
++/* Constant used to refer to coordinator domain (Xen) */
++#define COORDINATOR_DOMAIN -1
+ 
+ /* add data to the event buffer */
+ void add_event_entry(unsigned long data);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/oprof.c
+--- a/drivers/oprofile/oprof.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/oprof.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -5,6 +5,10 @@
+  * @remark Read the file COPYING
+  *
+  * @author John Levon <levon at movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+  */
+ 
+ #include <linux/kernel.h>
+@@ -19,7 +23,7 @@
+ #include "cpu_buffer.h"
+ #include "buffer_sync.h"
+ #include "oprofile_stats.h"
+- 
 +
-+static struct intel_mp_floating *mpf_found;
+ struct oprofile_operations oprofile_ops;
+ 
+ unsigned long oprofile_started;
+@@ -32,6 +36,32 @@
+    1 - use the timer int mechanism regardless
+  */
+ static int timer = 0;
 +
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
++int oprofile_set_active(int active_domains[], unsigned int adomains)
 +{
-+	struct intel_mp_floating *mpf = mpf_found;
-+
-+	/*
-+ 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-+ 	 * processors, where MPS only supports physical.
-+ 	 */
-+ 	if (acpi_lapic && acpi_ioapic) {
-+ 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ 		return;
-+	}
-+ 	else if (acpi_lapic)
-+ 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+	if (mpf->mpf_feature2 & (1<<7)) {
-+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-+		pic_mode = 1;
-+	} else {
-+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-+		pic_mode = 0;
-+	}
++	int err;
 +
-+	/*
-+	 * Now see if we need to read further.
-+	 */
-+	if (mpf->mpf_feature1 != 0) {
++	if (!oprofile_ops.set_active)
++		return -EINVAL;
 +
-+		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+		construct_default_ISA_mptable(mpf->mpf_feature1);
++	mutex_lock(&start_mutex);
++	err = oprofile_ops.set_active(active_domains, adomains);
++	mutex_unlock(&start_mutex);
++	return err;
++}
 +
-+	} else if (mpf->mpf_physptr) {
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
++{
++	int err;
 +
-+		/*
-+		 * Read the physical hardware table.  Anything here will
-+		 * override the defaults.
-+		 */
-+ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+			smp_found_config = 0;
-+			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+			return;
-+		}
-+		/*
-+		 * If there are no explicit MP IRQ entries, then we are
-+		 * broken.  We set up most of the low 16 IO-APIC pins to
-+		 * ISA defaults and hope it will work.
-+		 */
-+		if (!mp_irq_entries) {
-+			struct mpc_config_bus bus;
++	if (!oprofile_ops.set_passive)
++		return -EINVAL;
 +
-+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++	mutex_lock(&start_mutex);
++	err = oprofile_ops.set_passive(passive_domains, pdomains);
++	mutex_unlock(&start_mutex);
++	return err;
++}
+ 
+ int oprofile_setup(void)
+ {
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/oprof.h
+--- a/drivers/oprofile/oprof.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/oprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -35,5 +35,8 @@
+ void oprofile_timer_init(struct oprofile_operations * ops);
+ 
+ int oprofile_set_backtrace(unsigned long depth);
 +
-+			bus.mpc_type = MP_BUS;
-+			bus.mpc_busid = 0;
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			MP_bus_info(&bus);
++int oprofile_set_active(int active_domains[], unsigned int adomains);
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
+  
+ #endif /* OPROF_H */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/oprofile/oprofile_files.c
+--- a/drivers/oprofile/oprofile_files.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/oprofile/oprofile_files.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -5,15 +5,21 @@
+  * @remark Read the file COPYING
+  *
+  * @author John Levon <levon at movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.	
+  */
+ 
+ #include <linux/fs.h>
+ #include <linux/oprofile.h>
++#include <asm/uaccess.h>
++#include <linux/ctype.h>
+ 
+ #include "event_buffer.h"
+ #include "oprofile_stats.h"
+ #include "oprof.h"
+- 
 +
-+			construct_default_ioirq_mptable(0);
-+		}
+ unsigned long fs_buffer_size = 131072;
+ unsigned long fs_cpu_buffer_size = 8192;
+ unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+@@ -117,11 +123,202 @@
+ static struct file_operations dump_fops = {
+ 	.write		= dump_write,
+ };
+- 
 +
-+	} else
-+		BUG();
++#define TMPBUFSIZE 512
 +
-+	printk(KERN_INFO "Processors: %d\n", num_processors);
-+	/*
-+	 * Only use the first configuration found.
-+	 */
-+}
++static unsigned int adomains = 0;
++static int active_domains[MAX_OPROF_DOMAINS + 1];
++static DEFINE_MUTEX(adom_mutex);
 +
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
++static ssize_t adomain_write(struct file * file, char const __user * buf, 
++			     size_t count, loff_t * offset)
 +{
-+	extern void __bad_mpf_size(void); 
-+	unsigned int *bp = isa_bus_to_virt(base);
-+	struct intel_mp_floating *mpf;
-+
-+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	if (sizeof(*mpf) != 16)
-+		__bad_mpf_size();
++	char *tmpbuf;
++	char *startp, *endp;
++	int i;
++	unsigned long val;
++	ssize_t retval = count;
++	
++	if (*offset)
++		return -EINVAL;	
++	if (count > TMPBUFSIZE - 1)
++		return -EINVAL;
 +
-+	while (length > 0) {
-+		mpf = (struct intel_mp_floating *)bp;
-+		if ((*bp == SMP_MAGIC_IDENT) &&
-+			(mpf->mpf_length == 1) &&
-+			!mpf_checksum((unsigned char *)bp, 16) &&
-+			((mpf->mpf_specification == 1)
-+				|| (mpf->mpf_specification == 4)) ) {
++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++		return -ENOMEM;
 +
-+			smp_found_config = 1;
-+			mpf_found = mpf;
-+			return 1;
-+		}
-+		bp += 4;
-+		length -= 16;
++	if (copy_from_user(tmpbuf, buf, count)) {
++		kfree(tmpbuf);
++		return -EFAULT;
 +	}
-+	return 0;
-+}
++	tmpbuf[count] = 0;
 +
-+void __init find_intel_smp (void)
-+{
-+	unsigned int address;
++	mutex_lock(&adom_mutex);
 +
-+	/*
-+	 * FIXME: Linux assumes you have 640K of base ram..
-+	 * this continues the error...
-+	 *
-+	 * 1) Scan the bottom 1K for a signature
-+	 * 2) Scan the top 1K of base RAM
-+	 * 3) Scan the 64K of bios
-+	 */
-+	if (smp_scan_config(0x0,0x400) ||
-+		smp_scan_config(639*0x400,0x400) ||
-+			smp_scan_config(0xF0000,0x10000))
-+		return;
-+	/*
-+	 * If it is an SMP machine we should know now, unless the
-+	 * configuration is in an EISA/MCA bus machine with an
-+	 * extended bios data area.
-+	 *
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E, calculate and scan it here.
-+	 *
-+	 * NOTE! There are Linux loaders that will corrupt the EBDA
-+	 * area, and as such this kind of SMP config may be less
-+	 * trustworthy, simply because the SMP table may have been
-+	 * stomped on during early boot. These loaders are buggy and
-+	 * should be fixed.
-+	 */
++	startp = tmpbuf;
++	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++		val = simple_strtoul(startp, &endp, 0);
++		if (endp == startp)
++			break;
++		while (ispunct(*endp) || isspace(*endp))
++			endp++;
++		active_domains[i] = val;
++		if (active_domains[i] != val)
++			/* Overflow, force error below */
++			i = MAX_OPROF_DOMAINS + 1;
++		startp = endp;
++	}
++	/* Force error on trailing junk */
++	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
 +
-+	address = *(unsigned short *)phys_to_virt(0x40E);
-+	address <<= 4;
-+	if (smp_scan_config(address, 0x1000))
-+		return;
++	kfree(tmpbuf);
 +
-+	/* If we have come this far, we did not find an MP table  */
-+	 printk(KERN_INFO "No mptable found.\n");
-+}
++	if (adomains > MAX_OPROF_DOMAINS
++	    || oprofile_set_active(active_domains, adomains)) {
++		adomains = 0;
++		retval = -EINVAL;
++	}
 +
-+/*
-+ * - Intel MP Configuration Table
-+ */
-+void __init find_smp_config (void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	find_intel_smp();
-+#endif
++	mutex_unlock(&adom_mutex);
++	return retval;
 +}
 +
++static ssize_t adomain_read(struct file * file, char __user * buf, 
++			    size_t count, loff_t * offset)
++{
++	char * tmpbuf;
++	size_t len;
++	int i;
++	ssize_t retval;
 +
-+/* --------------------------------------------------------------------------
-+                            ACPI-based MP Configuration
-+   -------------------------------------------------------------------------- */
++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++		return -ENOMEM;
 +
-+#ifdef CONFIG_ACPI
++	mutex_lock(&adom_mutex);
 +
-+void __init mp_register_lapic_address (
-+	u64			address)
-+{
-+#ifndef CONFIG_XEN
-+	mp_lapic_addr = (unsigned long) address;
++	len = 0;
++	for (i = 0; i < adomains; i++)
++		len += snprintf(tmpbuf + len,
++				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++				"%u ", active_domains[i]);
++	WARN_ON(len > TMPBUFSIZE);
++	if (len != 0 && len <= TMPBUFSIZE)
++		tmpbuf[len-1] = '\n';
 +
-+	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++	mutex_unlock(&adom_mutex);
 +
-+	if (boot_cpu_id == -1U)
-+		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
 +
-+	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
++	kfree(tmpbuf);
++	return retval;
 +}
 +
 +
-+void __cpuinit mp_register_lapic (
-+	u8			id, 
-+	u8			enabled)
-+{
-+	struct mpc_config_processor processor;
-+	int			boot_cpu = 0;
-+	
-+	if (id >= MAX_APICS) {
-+		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+			id, MAX_APICS);
-+		return;
-+	}
-+
-+	if (id == boot_cpu_physical_apicid)
-+		boot_cpu = 1;
++static struct file_operations active_domain_ops = {
++	.read		= adomain_read,
++	.write		= adomain_write,
++};
 +
-+#ifndef CONFIG_XEN
-+	processor.mpc_type = MP_PROCESSOR;
-+	processor.mpc_apicid = id;
-+	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-+		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+#endif
++static unsigned int pdomains = 0;
++static int passive_domains[MAX_OPROF_DOMAINS];
++static DEFINE_MUTEX(pdom_mutex);
 +
-+	MP_processor_info(&processor);
-+}
++static ssize_t pdomain_write(struct file * file, char const __user * buf, 
++			     size_t count, loff_t * offset)
++{
++	char *tmpbuf;
++	char *startp, *endp;
++	int i;
++	unsigned long val;
++	ssize_t retval = count;
++	
++	if (*offset)
++		return -EINVAL;	
++	if (count > TMPBUFSIZE - 1)
++		return -EINVAL;
 +
-+#ifdef CONFIG_X86_IO_APIC
++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++		return -ENOMEM;
 +
-+#define MP_ISA_BUS		0
-+#define MP_MAX_IOAPIC_PIN	127
++	if (copy_from_user(tmpbuf, buf, count)) {
++		kfree(tmpbuf);
++		return -EFAULT;
++	}
++	tmpbuf[count] = 0;
 +
-+static struct mp_ioapic_routing {
-+	int			apic_id;
-+	int			gsi_start;
-+	int			gsi_end;
-+	u32			pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
++	mutex_lock(&pdom_mutex);
 +
++	startp = tmpbuf;
++	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++		val = simple_strtoul(startp, &endp, 0);
++		if (endp == startp)
++			break;
++		while (ispunct(*endp) || isspace(*endp))
++			endp++;
++		passive_domains[i] = val;
++		if (passive_domains[i] != val)
++			/* Overflow, force error below */
++			i = MAX_OPROF_DOMAINS + 1;
++		startp = endp;
++	}
++	/* Force error on trailing junk */
++	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
 +
-+static int mp_find_ioapic (
-+	int			gsi)
-+{
-+	int			i = 0;
++	kfree(tmpbuf);
 +
-+	/* Find the IOAPIC that manages this GSI. */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if ((gsi >= mp_ioapic_routing[i].gsi_start)
-+			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-+			return i;
++	if (pdomains > MAX_OPROF_DOMAINS
++	    || oprofile_set_passive(passive_domains, pdomains)) {
++		pdomains = 0;
++		retval = -EINVAL;
 +	}
 +
-+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+	return -1;
++	mutex_unlock(&pdom_mutex);
++	return retval;
 +}
-+	
 +
-+void __init mp_register_ioapic (
-+	u8			id, 
-+	u32			address,
-+	u32			gsi_base)
++static ssize_t pdomain_read(struct file * file, char __user * buf, 
++			    size_t count, loff_t * offset)
 +{
-+	int			idx = 0;
++	char * tmpbuf;
++	size_t len;
++	int i;
++	ssize_t retval;
 +
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+	}
-+	if (!address) {
-+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+			" found in MADT table, skipping!\n");
-+		return;
-+	}
++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++		return -ENOMEM;
 +
-+	idx = nr_ioapics++;
++	mutex_lock(&pdom_mutex);
 +
-+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+	mp_ioapics[idx].mpc_apicaddr = address;
++	len = 0;
++	for (i = 0; i < pdomains; i++)
++		len += snprintf(tmpbuf + len,
++				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++				"%u ", passive_domains[i]);
++	WARN_ON(len > TMPBUFSIZE);
++	if (len != 0 && len <= TMPBUFSIZE)
++		tmpbuf[len-1] = '\n';
 +
-+#ifndef CONFIG_XEN
-+	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+#endif
-+	mp_ioapics[idx].mpc_apicid = id;
-+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+	
-+	/* 
-+	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-+	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-+	 */
-+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+	mp_ioapic_routing[idx].gsi_start = gsi_base;
-+	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+		io_apic_get_redir_entries(idx);
++	mutex_unlock(&pdom_mutex);
 +
-+	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-+		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-+		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+		mp_ioapic_routing[idx].gsi_start,
-+		mp_ioapic_routing[idx].gsi_end);
++	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
 +
-+	return;
++	kfree(tmpbuf);
++	return retval;
 +}
 +
++static struct file_operations passive_domain_ops = {
++	.read		= pdomain_read,
++	.write		= pdomain_write,
++};
 +
-+void __init mp_override_legacy_irq (
-+	u8			bus_irq,
-+	u8			polarity, 
-+	u8			trigger, 
-+	u32			gsi)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			ioapic = -1;
-+	int			pin = -1;
-+
-+	/* 
-+	 * Convert 'gsi' to 'ioapic.pin'.
-+	 */
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0)
-+		return;
-+	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+	/*
-+	 * TBD: This check is for faulty timer entries, where the override
-+	 *      erroneously sets the trigger to level, resulting in a HUGE 
-+	 *      increase of timer interrupts!
-+	 */
-+	if ((bus_irq == 0) && (trigger == 3))
-+		trigger = 1;
+ void oprofile_create_files(struct super_block * sb, struct dentry * root)
+ {
+ 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
++	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
++	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
+ 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/pci/bus.c
+--- a/drivers/pci/bus.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/pci/bus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -16,6 +16,8 @@
+ #include <linux/init.h>
+ 
+ #include "pci.h"
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqtype = mp_INT;
-+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-+	intsrc.mpc_dstirq = pin;				    /* INTIN# */
++extern int pci_mem_align;
+ 
+ /**
+  * pci_bus_alloc_resource - allocate a resource from a parent bus
+@@ -43,6 +45,11 @@
+ 	int i, ret = -ENOMEM;
+ 
+ 	type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
++
++	/* If the boot parameter 'pci-mem-align' was specified then we need to 
++	   align the memory addresses, at page size alignment. */
++	if (pci_mem_align && (align < (PAGE_SIZE-1)))
++		align = PAGE_SIZE - 1;
+ 
+ 	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ 		struct resource *r = bus->resource[i];
+diff -r d894e36cfc30 -r 0aa021803deb drivers/pci/msi-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/pci/msi-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,760 @@
++/*
++ * File:	msi.c
++ * Purpose:	PCI Message Signaled Interrupt (MSI)
++ *
++ * Copyright (C) 2003-2004 Intel
++ * Copyright (C) Tom Long Nguyen (tom.l.nguyen at intel.com)
++ */
 +
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
-+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/smp_lock.h>
++#include <linux/pci.h>
++#include <linux/proc_fs.h>
 +
-+	mp_irqs[mp_irq_entries] = intsrc;
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!\n");
++#include <xen/evtchn.h>
 +
-+	return;
-+}
++#include <asm/errno.h>
++#include <asm/io.h>
++#include <asm/smp.h>
 +
++#include "pci.h"
++#include "msi.h"
 +
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			i = 0;
-+	int			ioapic = -1;
++static int pci_msi_enable = 1;
 +
-+	/* 
-+	 * Fabricate the legacy ISA bus (bus #31).
-+	 */
-+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++static struct msi_ops *msi_ops;
 +
-+	/* 
-+	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-+	 */
-+	ioapic = mp_find_ioapic(0);
-+	if (ioapic < 0)
-+		return;
++int msi_register(struct msi_ops *ops)
++{
++	msi_ops = ops;
++	return 0;
++}
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;					/* Conforming */
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++static LIST_HEAD(msi_dev_head);
++DEFINE_SPINLOCK(msi_dev_lock);
 +
-+	/* 
-+	 * Use the default configuration for the IRQs 0-15.  Unless
-+	 * overridden by (MADT) interrupt source override entries.
-+	 */
-+	for (i = 0; i < 16; i++) {
-+		int idx;
++struct msi_dev_list {
++	struct pci_dev *dev;
++	struct list_head list;
++	spinlock_t pirq_list_lock;
++	struct list_head pirq_list_head;
++};
 +
-+		for (idx = 0; idx < mp_irq_entries; idx++) {
-+			struct mpc_config_intsrc *irq = mp_irqs + idx;
++struct msi_pirq_entry {
++	struct list_head list;
++	int pirq;
++	int entry_nr;
++};
 +
-+			/* Do we already have a mapping for this ISA IRQ? */
-+			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+				break;
++static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
++{
++	struct msi_dev_list *msi_dev_list, *ret = NULL;
++	unsigned long flags;
 +
-+			/* Do we already have a mapping for this IOAPIC pin */
-+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+				(irq->mpc_dstirq == i))
-+				break;
-+		}
++	spin_lock_irqsave(&msi_dev_lock, flags);
 +
-+		if (idx != mp_irq_entries) {
-+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+			continue;			/* IRQ already used */
-+		}
++	list_for_each_entry(msi_dev_list, &msi_dev_head, list)
++		if ( msi_dev_list->dev == dev )
++			ret = msi_dev_list;
 +
-+		intsrc.mpc_irqtype = mp_INT;
-+		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-+		intsrc.mpc_dstirq = i;
++	if ( ret ) {
++		spin_unlock_irqrestore(&msi_dev_lock, flags);
++		return ret;
++	}
 +
-+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-+			intsrc.mpc_dstirq);
++	/* Has not allocate msi_dev until now. */
++	ret = kmalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
 +
-+		mp_irqs[mp_irq_entries] = intsrc;
-+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+			panic("Max # of irq sources exceeded!\n");
++	/* Failed to allocate msi_dev structure */
++	if ( !ret ) {
++		spin_unlock_irqrestore(&msi_dev_lock, flags);
++		return NULL;
 +	}
 +
-+	return;
++	spin_lock_init(&ret->pirq_list_lock);
++	INIT_LIST_HEAD(&ret->pirq_list_head);
++	list_add_tail(&ret->list, &msi_dev_head);
++	spin_unlock_irqrestore(&msi_dev_lock, flags);
++	return ret;
 +}
 +
-+#define MAX_GSI_NUM	4096
-+
-+int mp_register_gsi(u32 gsi, int triggering, int polarity)
++static int attach_pirq_entry(int pirq, int entry_nr,
++                             struct msi_dev_list *msi_dev_entry)
 +{
-+	int			ioapic = -1;
-+	int			ioapic_pin = 0;
-+	int			idx, bit = 0;
-+	static int		pci_irq = 16;
-+	/*
-+	 * Mapping between Global System Interrupts, which
-+	 * represent all possible interrupts, to the IRQs
-+	 * assigned to actual devices.
-+	 */
-+	static int		gsi_to_irq[MAX_GSI_NUM];
++	struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++	unsigned long flags;
 +
-+	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-+		return gsi;
++	if (!entry)
++		return -ENOMEM;
++	entry->pirq = pirq;
++	entry->entry_nr = entry_nr;
++	spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++	list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
++	spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++	return 0;
++}
 +
-+	/* Don't set up the ACPI SCI because it's already set up */
-+	if (acpi_fadt.sci_int == gsi)
-+		return gsi;
++static void detach_pirq_entry(int entry_nr,
++ 							struct msi_dev_list *msi_dev_entry)
++{
++	unsigned long flags;
++	struct msi_pirq_entry *pirq_entry;
 +
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0) {
-+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+		return gsi;
++	list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++		if (pirq_entry->entry_nr == entry_nr) {
++			spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++			list_del(&pirq_entry->list);
++			spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++			kfree(pirq_entry);
++			return;
++		}
 +	}
++}
 +
-+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++/*
++ * pciback will provide device's owner
++ */
++static int (*get_owner)(struct pci_dev *dev);
 +
-+	/* 
-+	 * Avoid pin reprogramming.  PRTs typically include entries  
-+	 * with redundant pin->gsi mappings (but unique PCI devices);
-+	 * we only program the IOAPIC on the first.
-+	 */
-+	bit = ioapic_pin % 32;
-+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+	if (idx > 3) {
-+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-+			ioapic_pin);
-+		return gsi;
-+	}
-+	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+		return gsi_to_irq[gsi];
++int register_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++	if (get_owner) {
++		printk(KERN_WARNING "register msi_get_owner again\n");
++		return -EEXIST;
 +	}
++	get_owner = func;
++	return 0;
++}
 +
-+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++	if (get_owner != func)
++		return -EINVAL;
++	get_owner = NULL;
++	return 0;
++}
 +
-+	if (triggering == ACPI_LEVEL_SENSITIVE) {
-+		/*
-+		 * For PCI devices assign IRQs in order, avoiding gaps
-+		 * due to unused I/O APIC pins.
-+		 */
-+		int irq = gsi;
-+		if (gsi < MAX_GSI_NUM) {
-+			/*
-+			 * Retain the VIA chipset work-around (gsi > 15), but
-+			 * avoid a problem where the 8254 timer (IRQ0) is setup
-+			 * via an override (so it's not on pin 0 of the ioapic),
-+			 * and at the same time, the pin 0 interrupt is a PCI
-+			 * type.  The gsi > 15 test could cause these two pins
-+			 * to be shared as IRQ0, and they are not shareable.
-+			 * So test for this condition, and if necessary, avoid
-+			 * the pin collision.
-+			 */
-+			if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
-+				gsi = pci_irq++;
-+			/*
-+			 * Don't assign IRQ used by ACPI SCI
-+			 */
-+			if (gsi == acpi_fadt.sci_int)
-+				gsi = pci_irq++;
-+			gsi_to_irq[irq] = gsi;
-+		} else {
-+			printk(KERN_ERR "GSI %u is too high\n", gsi);
-+			return gsi;
-+		}
++static int msi_get_dev_owner(struct pci_dev *dev)
++{
++	int owner;
++
++	BUG_ON(!is_initial_xendomain());
++	if (get_owner && (owner = get_owner(dev)) >= 0) {
++		printk(KERN_INFO "get owner for dev %x get %x \n",
++		       dev->devfn, owner);
++		return owner;
 +	}
 +
-+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+		triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+		polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+	return gsi;
++	return DOMID_SELF;
 +}
 +
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/pci-swiotlb-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/pci-swiotlb-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/pci-swiotlb-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/pci-swiotlb-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,55 @@
-+/* Glue code to lib/swiotlb.c */
++static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
++{
++	struct physdev_unmap_pirq unmap;
++	int rc;
 +
-+#include <linux/pci.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/dma-mapping.h>
-+#include <asm/proto.h>
-+#include <asm/swiotlb.h>
-+#include <asm/dma.h>
++	unmap.domid = msi_get_dev_owner(dev);
++	unmap.pirq = evtchn_get_xen_pirq(pirq);
 +
-+#if 0
-+int swiotlb __read_mostly;
-+EXPORT_SYMBOL(swiotlb);
-+#endif
++	if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
++		printk(KERN_WARNING "unmap irq %x failed\n", pirq);
 +
-+struct dma_mapping_ops swiotlb_dma_ops = {
-+#if 0
-+	.mapping_error = swiotlb_dma_mapping_error,
-+	.alloc_coherent = swiotlb_alloc_coherent,
-+	.free_coherent = swiotlb_free_coherent,
-+	.map_single = swiotlb_map_single,
-+	.unmap_single = swiotlb_unmap_single,
-+	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+	.sync_single_for_device = swiotlb_sync_single_for_device,
-+	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-+	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
-+	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+	.sync_sg_for_device = swiotlb_sync_sg_for_device,
-+	.map_sg = swiotlb_map_sg,
-+	.unmap_sg = swiotlb_unmap_sg,
-+	.dma_supported = NULL,
-+#endif
-+};
++	if (rc < 0)
++		return rc;
 +
-+void pci_swiotlb_init(void)
++	evtchn_map_pirq(pirq, 0);
++	return 0;
++}
++
++static u64 find_table_base(struct pci_dev *dev, int pos)
 +{
-+#if 0
-+	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
-+	if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
-+	       swiotlb = 1;
-+	if (swiotlb_force)
-+		swiotlb = 1;
-+	if (swiotlb) {
-+		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+		swiotlb_init();
-+		dma_ops = &swiotlb_dma_ops;
-+	}
-+#else
-+	swiotlb_init();
-+	if (swiotlb) {
-+		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+		dma_ops = &swiotlb_dma_ops;
-+	}
-+#endif
++	u8 bar;
++	u32 reg;
++	unsigned long flags;
++
++ 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
++	bar = reg & PCI_MSIX_FLAGS_BIRMASK;
++
++	flags = pci_resource_flags(dev, bar);
++	if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
++		return 0;
++
++	return pci_resource_start(dev, bar);
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/process-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/process-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/process-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,829 @@
-+/*
-+ *  linux/arch/x86-64/kernel/process.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ * 
-+ *  X86-64 port
-+ *	Andi Kleen.
-+ *
-+ *	CPU hotplug support - ashok.raj at intel.com
-+ * 
-+ *  Jun Nakajima <jun.nakajima at intel.com> 
-+ *     Modified for Xen
-+ */
 +
 +/*
-+ * This file handles the architecture-dependent parts of process handling..
++ * Protected by msi_lock
 + */
++static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
++				  int entry_nr, u64 table_base)
++{
++	struct physdev_map_pirq map_irq;
++	int rc;
++	domid_t domid = DOMID_SELF;
 +
-+#include <stdarg.h>
++	domid = msi_get_dev_owner(dev);
 +
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/module.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/random.h>
-+#include <linux/notifier.h>
-+#include <linux/kprobes.h>
++	map_irq.domid = domid;
++	map_irq.type = MAP_PIRQ_TYPE_MSI;
++	map_irq.index = -1;
++	map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
++	map_irq.bus = dev->bus->number;
++	map_irq.devfn = dev->devfn;
++	map_irq.entry_nr = entry_nr;
++	map_irq.table_base = table_base;
 +
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pda.h>
-+#include <asm/prctl.h>
-+#include <asm/kdebug.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/hardirq.h>
-+#include <asm/ia32.h>
-+#include <asm/idle.h>
++	if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
++		printk(KERN_WARNING "map irq failed\n");
 +
-+#include <xen/cpu_hotplug.h>
++	if (rc < 0)
++		return rc;
++	/* This happens when MSI support is not enabled in Xen. */
++	if (rc == 0 && map_irq.pirq < 0)
++		return -ENOSYS;
 +
-+asmlinkage extern void ret_from_fork(void);
++	BUG_ON(map_irq.pirq <= 0);
++	return evtchn_map_pirq(pirq, map_irq.pirq);
++}
 +
-+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
++{
++	return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
++}
 +
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
++static int msi_init(void)
++{
++	static int status = 0;
 +
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+EXPORT_SYMBOL(pm_idle);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++	if (pci_msi_quirk) {
++		pci_msi_enable = 0;
++		printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
++		status = -EINVAL;
++	}
 +
-+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++	return status;
++}
 +
-+void idle_notifier_register(struct notifier_block *n)
++void pci_scan_msi_device(struct pci_dev *dev) { }
++
++void disable_msi_mode(struct pci_dev *dev, int pos, int type)
 +{
-+	atomic_notifier_chain_register(&idle_notifier, n);
++	u16 control;
++
++	pci_read_config_word(dev, msi_control_reg(pos), &control);
++	if (type == PCI_CAP_ID_MSI) {
++		/* Set enabled bits to single MSI & enable MSI_enable bit */
++		msi_disable(control);
++		pci_write_config_word(dev, msi_control_reg(pos), control);
++		dev->msi_enabled = 0;
++	} else {
++		msix_disable(control);
++		pci_write_config_word(dev, msi_control_reg(pos), control);
++		dev->msix_enabled = 0;
++	}
++    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++		/* PCI Express Endpoint device detected */
++		pci_intx(dev, 1);  /* enable intx */
++	}
 +}
-+EXPORT_SYMBOL_GPL(idle_notifier_register);
 +
-+void idle_notifier_unregister(struct notifier_block *n)
++static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
 +{
-+	atomic_notifier_chain_unregister(&idle_notifier, n);
++	u16 control;
++
++	pci_read_config_word(dev, msi_control_reg(pos), &control);
++	if (type == PCI_CAP_ID_MSI) {
++		/* Set enabled bits to single MSI & enable MSI_enable bit */
++		msi_enable(control, 1);
++		pci_write_config_word(dev, msi_control_reg(pos), control);
++		dev->msi_enabled = 1;
++	} else {
++		msix_enable(control);
++		pci_write_config_word(dev, msi_control_reg(pos), control);
++		dev->msix_enabled = 1;
++	}
++    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++		/* PCI Express Endpoint device detected */
++		pci_intx(dev, 0);  /* disable intx */
++	}
 +}
-+EXPORT_SYMBOL(idle_notifier_unregister);
 +
-+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
-+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++#ifdef CONFIG_PM
++int pci_save_msi_state(struct pci_dev *dev)
++{
++	int pos;
 +
-+void enter_idle(void)
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (pos <= 0 || dev->no_msi)
++		return 0;
++
++	if (!dev->msi_enabled)
++		return 0;
++
++	/* Restore dev->irq to its default pin-assertion vector */
++	msi_unmap_pirq(dev, dev->irq);
++	/* Disable MSI mode */
++	disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++	/* Set the flags for use of restore */
++	dev->msi_enabled = 1;
++	return 0;
++}
++
++void pci_restore_msi_state(struct pci_dev *dev)
 +{
-+	__get_cpu_var(idle_state) = CPU_IDLE;
-+	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++	int pos, pirq;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (pos <= 0)
++		return;
++
++	if (!dev->msi_enabled)
++		return;
++
++	pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
++	if (pirq < 0)
++		return;
++	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
 +}
 +
-+static void __exit_idle(void)
++int pci_save_msix_state(struct pci_dev *dev)
 +{
-+	__get_cpu_var(idle_state) = CPU_NOT_IDLE;
-+	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++	int pos;
++	unsigned long flags;
++	struct msi_dev_list *msi_dev_entry;
++	struct msi_pirq_entry *pirq_entry, *tmp;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++	if (pos <= 0 || dev->no_msi)
++		return 0;
++
++	/* save the capability */
++	if (!dev->msix_enabled)
++		return 0;
++
++	msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++	spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++        list_for_each_entry_safe(pirq_entry, tmp,
++                                 &msi_dev_entry->pirq_list_head, list)
++		msi_unmap_pirq(dev, pirq_entry->pirq);
++	spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++	disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++	/* Set the flags for use of restore */
++	dev->msix_enabled = 1;
++
++	return 0;
 +}
 +
-+/* Called from interrupts to signify idle end */
-+void exit_idle(void)
++void pci_restore_msix_state(struct pci_dev *dev)
 +{
-+	if (current->pid | read_pda(irqcount))
++	int pos;
++	unsigned long flags;
++	u64 table_base;
++	struct msi_dev_list *msi_dev_entry;
++	struct msi_pirq_entry *pirq_entry, *tmp;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++	if (pos <= 0)
 +		return;
-+	__exit_idle();
++
++	if (!dev->msix_enabled)
++		return;
++
++	msi_dev_entry = get_msi_dev_pirq_list(dev);
++	table_base = find_table_base(dev, pos);
++	if (!table_base)
++		return;
++
++	spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++	list_for_each_entry_safe(pirq_entry, tmp,
++				 &msi_dev_entry->pirq_list_head, list) {
++		int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
++						pirq_entry->entry_nr, table_base);
++		if (rc < 0)
++			printk(KERN_WARNING
++			       "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
++			       pci_name(dev), pirq_entry->entry_nr,
++			       pirq_entry->pirq, rc);
++	}
++	spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
 +}
++#endif
 +
-+/*
-+ * On SMP it's slightly faster (but much more power-consuming!)
-+ * to poll the ->need_resched flag instead of waiting for the
-+ * cross-CPU IPI to arrive. Use this option with caution.
-+ */
-+static void poll_idle (void)
++/**
++ * msi_capability_init - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with a single
++ * MSI vector, regardless of device function is capable of handling
++ * multiple messages. A return of zero indicates the successful setup
++ * of an entry zero with the new MSI vector or non-zero for otherwise.
++ **/
++static int msi_capability_init(struct pci_dev *dev)
 +{
-+	local_irq_enable();
++	int pos, pirq;
++	u16 control;
 +
-+	asm volatile(
-+		"2:"
-+		"testl %0,%1;"
-+		"rep; nop;"
-+		"je 2b;"
-+		: :
-+		"i" (_TIF_NEED_RESCHED),
-+		"m" (current_thread_info()->flags));
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	pci_read_config_word(dev, msi_control_reg(pos), &control);
++
++	pirq = msi_map_vector(dev, 0, 0);
++	if (pirq < 0)
++		return -EBUSY;
++
++	dev->irq = pirq;
++	/* Set MSI enabled bits	 */
++	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++	dev->msi_enabled = 1;
++
++	return 0;
 +}
 +
-+static void xen_idle(void)
++/**
++ * msix_capability_init - configure device's MSI-X capability
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of struct msix_entry entries
++ * @nvec: number of @entries
++ *
++ * Setup the MSI-X capability structure of device function with a
++ * single MSI-X vector. A return of zero indicates the successful setup of
++ * requested MSI-X entries with allocated vectors or non-zero for otherwise.
++ **/
++static int msix_capability_init(struct pci_dev *dev,
++				struct msix_entry *entries, int nvec)
 +{
-+	local_irq_disable();
++	u64 table_base;
++	int pirq, i, j, mapped, pos;
++	struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
++	struct msi_pirq_entry *pirq_entry;
 +
-+	if (need_resched())
-+		local_irq_enable();
-+	else {
-+		current_thread_info()->status &= ~TS_POLLING;
-+		smp_mb__after_clear_bit();
-+		safe_halt();
-+		current_thread_info()->status |= TS_POLLING;
++	if (!msi_dev_entry)
++		return -ENOMEM;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++	table_base = find_table_base(dev, pos);
++	if (!table_base)
++		return -ENODEV;
++
++	/* MSI-X Table Initialization */
++	for (i = 0; i < nvec; i++) {
++		mapped = 0;
++		list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++			if (pirq_entry->entry_nr == entries[i].entry) {
++				printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
++				       not freed before acquire again.\n", entries[i].entry,
++					   dev->bus->number, PCI_SLOT(dev->devfn),
++					   PCI_FUNC(dev->devfn));
++				(entries + i)->vector = pirq_entry->pirq;
++				mapped = 1;
++				break;
++			}
++		}
++		if (mapped)
++			continue;
++		pirq = msi_map_vector(dev, entries[i].entry, table_base);
++		if (pirq < 0)
++			break;
++		attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
++		(entries + i)->vector = pirq;
++	}
++
++	if (i != nvec) {
++		for (j = --i; j >= 0; j--) {
++			msi_unmap_pirq(dev, entries[j].vector);
++			detach_pirq_entry(entries[j].entry, msi_dev_entry);
++			entries[j].vector = 0;
++		}
++		return -EBUSY;
 +	}
++
++	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++	dev->msix_enabled = 1;
++
++	return 0;
 +}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+static inline void play_dead(void)
++/**
++ * pci_enable_msi - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with
++ * a single MSI vector upon its software driver call to request for
++ * MSI mode enabled on its hardware device function. A return of zero
++ * indicates the successful setup of an entry zero with the new MSI
++ * vector or non-zero for otherwise.
++ **/
++extern int pci_frontend_enable_msi(struct pci_dev *dev);
++int pci_enable_msi(struct pci_dev* dev)
 +{
-+	idle_task_exit();
-+	local_irq_disable();
-+	cpu_clear(smp_processor_id(), cpu_initialized);
-+	preempt_enable_no_resched();
-+	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+	cpu_bringup();
++	struct pci_bus *bus;
++	int pos, temp, status = -EINVAL;
++
++	if (!pci_msi_enable || !dev)
++ 		return status;
++
++	if (dev->no_msi)
++		return status;
++
++	for (bus = dev->bus; bus; bus = bus->parent)
++		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++			return -EINVAL;
++
++	status = msi_init();
++	if (status < 0)
++		return status;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++	if (!is_initial_xendomain())
++	{
++		int ret;
++
++		temp = dev->irq;
++		ret = pci_frontend_enable_msi(dev);
++		if (ret)
++			return ret;
++
++		dev->irq_old = temp;
++
++		return ret;
++	}
++#endif
++
++	temp = dev->irq;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (!pos)
++		return -EINVAL;
++
++	/* Check whether driver already requested for MSI-X vectors */
++	if (dev->msix_enabled) {
++		printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
++			   "Device already has MSI-X vectors assigned\n",
++			   pci_name(dev));
++		dev->irq = temp;
++		return -EINVAL;
++	}
++
++	status = msi_capability_init(dev);
++	if ( !status )
++		dev->irq_old = temp;
++    else
++		dev->irq = temp;
++
++	return status;
 +}
-+#else
-+static inline void play_dead(void)
++
++extern void pci_frontend_disable_msi(struct pci_dev* dev);
++void pci_disable_msi(struct pci_dev* dev)
 +{
-+	BUG();
++	int pos;
++	int pirq;
++
++	if (!pci_msi_enable)
++		return;
++	if (!dev)
++		return;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++	if (!is_initial_xendomain()) {
++		pci_frontend_disable_msi(dev);
++		dev->irq = dev->irq_old;
++		return;
++	}
++#endif
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (!pos)
++		return;
++
++	pirq = dev->irq;
++	/* Restore dev->irq to its default pin-assertion vector */
++	dev->irq = dev->irq_old;
++	msi_unmap_pirq(dev, pirq);
++
++	/* Disable MSI mode */
++	disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
 +}
-+#endif /* CONFIG_HOTPLUG_CPU */
 +
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle (void)
++/**
++ * pci_enable_msix - configure device's MSI-X capability structure
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of MSI-X entries
++ * @nvec: number of MSI-X vectors requested for allocation by device driver
++ *
++ * Setup the MSI-X capability structure of device function with the number
++ * of requested vectors upon its software driver call to request for
++ * MSI-X mode enabled on its hardware device function. A return of zero
++ * indicates the successful configuration of MSI-X capability structure
++ * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
++ * Or a return of > 0 indicates that driver request is exceeding the number
++ * of vectors available. Driver should use the returned value to re-send
++ * its request.
++ **/
++extern int pci_frontend_enable_msix(struct pci_dev *dev,
++		struct msix_entry *entries, int nvec);
++int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
 +{
-+	current_thread_info()->status |= TS_POLLING;
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		while (!need_resched()) {
-+			void (*idle)(void);
++	struct pci_bus *bus;
++	int status, pos, nr_entries;
++	int i, j, temp;
++	u16 control;
 +
-+			if (__get_cpu_var(cpu_idle_state))
-+				__get_cpu_var(cpu_idle_state) = 0;
-+			rmb();
-+			idle = xen_idle; /* no alternatives */
-+			if (cpu_is_offline(smp_processor_id()))
-+				play_dead();
-+			enter_idle();
-+			idle();
-+			__exit_idle();
++	if (!pci_msi_enable || !dev || !entries)
++ 		return -EINVAL;
++
++	if (dev->no_msi)
++		return -EINVAL;
++
++	for (bus = dev->bus; bus; bus = bus->parent)
++		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++			return -EINVAL;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++	if (!is_initial_xendomain()) {
++		int ret;
++
++		ret = pci_frontend_enable_msix(dev, entries, nvec);
++		if (ret) {
++			printk("get %x from pci_frontend_enable_msix\n", ret);
++			return ret;
 +		}
 +
-+		preempt_enable_no_resched();
-+		schedule();
-+		preempt_disable();
++        return 0;
 +	}
-+}
++#endif
 +
-+void cpu_idle_wait(void)
-+{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map;
++	status = msi_init();
++	if (status < 0)
++		return status;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++	if (!pos)
++ 		return -EINVAL;
++
++	pci_read_config_word(dev, msi_control_reg(pos), &control);
++	nr_entries = multi_msix_capable(control);
++	if (nvec > nr_entries)
++		return -EINVAL;
 +
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
++	/* Check for any invalid entries */
++	for (i = 0; i < nvec; i++) {
++		if (entries[i].entry >= nr_entries)
++			return -EINVAL;		/* invalid entry */
++		for (j = i + 1; j < nvec; j++) {
++			if (entries[i].entry == entries[j].entry)
++				return -EINVAL;	/* duplicate entry */
++		}
++	}
 +
-+ 	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
++	temp = dev->irq;
++	/* Check whether driver already requested for MSI vector */
++	if (dev->msi_enabled) {
++		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
++		       "Device already has an MSI vector assigned\n",
++		       pci_name(dev));
++		dev->irq = temp;
++		return -EINVAL;
 +	}
 +
-+	__get_cpu_var(cpu_idle_state) = 0;
++	status = msix_capability_init(dev, entries, nvec);
 +
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) &&
-+					!per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
-+		}
-+		cpus_and(map, map, cpu_online_map);
-+	} while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
++	if ( !status )
++		dev->irq_old = temp;
++	else
++		dev->irq = temp;
 +
-+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 
-+{
++	return status;
 +}
 +
-+static int __init idle_setup (char *str)
++extern void pci_frontend_disable_msix(struct pci_dev* dev);
++void pci_disable_msix(struct pci_dev* dev)
 +{
-+	if (!strncmp(str, "poll", 4)) {
-+		printk("using polling idle threads.\n");
-+		pm_idle = poll_idle;
-+	}
-+
-+	boot_option_idle_override = 1;
-+	return 1;
-+}
++	int pos;
++	u16 control;
 +
-+__setup("idle=", idle_setup);
 +
-+/* Prints also some state that isn't saved in the pt_regs */ 
-+void __show_regs(struct pt_regs * regs)
-+{
-+	unsigned long fs, gs, shadowgs;
-+	unsigned int fsindex,gsindex;
-+	unsigned int ds,cs,es; 
++	if (!pci_msi_enable)
++		return;
++	if (!dev)
++		return;
 +
-+	printk("\n");
-+	print_modules();
-+	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-+		current->pid, current->comm, print_tainted(),
-+		system_utsname.release,
-+		(int)strcspn(system_utsname.version, " "),
-+		system_utsname.version);
-+	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
-+	printk_address(regs->rip); 
-+	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
-+		regs->eflags);
-+	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
-+	       regs->rax, regs->rbx, regs->rcx);
-+	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
-+	       regs->rdx, regs->rsi, regs->rdi); 
-+	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
-+	       regs->rbp, regs->r8, regs->r9); 
-+	printk("R10: %016lx R11: %016lx R12: %016lx\n",
-+	       regs->r10, regs->r11, regs->r12); 
-+	printk("R13: %016lx R14: %016lx R15: %016lx\n",
-+	       regs->r13, regs->r14, regs->r15); 
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++	if (!is_initial_xendomain()) {
++		pci_frontend_disable_msix(dev);
++		dev->irq = dev->irq_old;
++		return;
++	}
++#endif
 +
-+	asm("mov %%ds,%0" : "=r" (ds)); 
-+	asm("mov %%cs,%0" : "=r" (cs)); 
-+	asm("mov %%es,%0" : "=r" (es)); 
-+	asm("mov %%fs,%0" : "=r" (fsindex));
-+	asm("mov %%gs,%0" : "=r" (gsindex));
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++	if (!pos)
++		return;
 +
-+	rdmsrl(MSR_FS_BASE, fs);
-+	rdmsrl(MSR_GS_BASE, gs); 
-+	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
++	pci_read_config_word(dev, msi_control_reg(pos), &control);
++	if (!(control & PCI_MSIX_FLAGS_ENABLE))
++		return;
 +
-+	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
-+	       fs,fsindex,gs,gsindex,shadowgs); 
-+	printk("CS:  %04x DS: %04x ES: %04x\n", cs, ds, es); 
++	msi_remove_pci_irq_vectors(dev);
 +
++	/* Disable MSI mode */
++	disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
 +}
 +
-+void show_regs(struct pt_regs *regs)
++/**
++ * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
++ * @dev: pointer to the pci_dev data structure of MSI(X) device function
++ *
++ * Being called during hotplug remove, from which the device function
++ * is hot-removed. All previous assigned MSI/MSI-X vectors, if
++ * allocated for this device function, are reclaimed to unused state,
++ * which may be used later on.
++ **/
++void msi_remove_pci_irq_vectors(struct pci_dev* dev)
 +{
-+	printk("CPU %d:", smp_processor_id());
-+	__show_regs(regs);
-+	show_trace(NULL, regs, (void *)(regs + 1));
++	unsigned long flags;
++	struct msi_dev_list *msi_dev_entry;
++	struct msi_pirq_entry *pirq_entry, *tmp;
++
++	if (!pci_msi_enable || !dev)
++ 		return;
++
++	msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++	spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++	if (!list_empty(&msi_dev_entry->pirq_list_head))
++	{
++		printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
++		       before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
++			   PCI_FUNC(dev->devfn));
++		list_for_each_entry_safe(pirq_entry, tmp,
++		                         &msi_dev_entry->pirq_list_head, list) {
++			msi_unmap_pirq(dev, pirq_entry->pirq);
++			list_del(&pirq_entry->list);
++			kfree(pirq_entry);
++		}
++	}
++	spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++	dev->irq = dev->irq_old;
 +}
 +
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
++void pci_no_msi(void)
 +{
-+	struct task_struct *me = current;
-+	struct thread_struct *t = &me->thread;
++	pci_msi_enable = 0;
++}
 +
-+	if (me->thread.io_bitmap_ptr) { 
-+#ifndef CONFIG_X86_NO_TSS
-+		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+#endif
++EXPORT_SYMBOL(pci_enable_msi);
++EXPORT_SYMBOL(pci_disable_msi);
++EXPORT_SYMBOL(pci_enable_msix);
++EXPORT_SYMBOL(pci_disable_msix);
 +#ifdef CONFIG_XEN
-+		struct physdev_set_iobitmap iobmp_op;
-+		memset(&iobmp_op, 0, sizeof(iobmp_op));
++EXPORT_SYMBOL(register_msi_get_owner);
++EXPORT_SYMBOL(unregister_msi_get_owner);
 +#endif
 +
-+		kfree(t->io_bitmap_ptr);
-+		t->io_bitmap_ptr = NULL;
-+		/*
-+		 * Careful, clear this in the TSS too:
-+		 */
-+#ifndef CONFIG_X86_NO_TSS
-+		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-+		put_cpu();
-+#endif
-+#ifdef CONFIG_XEN
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
-+#endif
-+		t->io_bitmap_max = 0;
-+	}
-+}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/pci/quirks.c
+--- a/drivers/pci/quirks.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/pci/quirks.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -22,6 +22,40 @@
+ #include <linux/delay.h>
+ #include <linux/acpi.h>
+ #include "pci.h"
 +
-+void load_gs_index(unsigned gs)
++/* A global flag which signals if we should page-align PCI mem windows. */
++int pci_mem_align = 0;
++
++static int __init set_pci_mem_align(char *str)
 +{
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
++	pci_mem_align = 1;
++	return 1;
 +}
++__setup("pci-mem-align", set_pci_mem_align);
 +
-+void flush_thread(void)
++/* This quirk function enables us to force all memory resources which are 
++ * assigned to PCI devices, to be page-aligned.
++ */
++static void __devinit quirk_align_mem_resources(struct pci_dev *dev)
 +{
-+	struct task_struct *tsk = current;
-+	struct thread_info *t = current_thread_info();
++	int i;
++	struct resource *r;
++	resource_size_t old_start;
 +
-+	if (t->flags & _TIF_ABI_PENDING) {
-+		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
-+		if (t->flags & _TIF_IA32)
-+			current_thread_info()->status |= TS_COMPAT;
-+	}
++	if (!pci_mem_align)
++		return;
 +
-+	tsk->thread.debugreg0 = 0;
-+	tsk->thread.debugreg1 = 0;
-+	tsk->thread.debugreg2 = 0;
-+	tsk->thread.debugreg3 = 0;
-+	tsk->thread.debugreg6 = 0;
-+	tsk->thread.debugreg7 = 0;
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	/*
-+	 * Forget coprocessor state..
-+	 */
-+	clear_fpu(tsk);
-+	clear_used_math();
-+}
++	for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
++		r = &dev->resource[i];
++		if ((r == NULL) || !(r->flags & IORESOURCE_MEM))
++			continue;
 +
-+void release_thread(struct task_struct *dead_task)
-+{
-+	if (dead_task->mm) {
-+		if (dead_task->mm->context.size) {
-+			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+					dead_task->comm,
-+					dead_task->mm->context.ldt,
-+					dead_task->mm->context.size);
-+			BUG();
-+		}
++		old_start = r->start;
++		r->start = (r->start + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
++		r->end = r->end - (old_start - r->start);
 +	}
 +}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_align_mem_resources);
+ 
+ /* The Mellanox Tavor device gives false positive parity errors
+  * Mark this device with a broken_parity_status, to allow
+@@ -1512,10 +1546,11 @@
+ 
+ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
+ {
+-	u16 command;
++	u16 command, pmcsr;
+ 	u32 bar;
+ 	u8 __iomem *csr;
+ 	u8 cmd_hi;
++	int pm;
+ 
+ 	switch (dev->device) {
+ 	/* PCI IDs taken from drivers/net/e100.c */
+@@ -1549,6 +1584,17 @@
+ 
+ 	if (!(command & PCI_COMMAND_MEMORY) || !bar)
+ 		return;
 +
-+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-+{
-+	struct user_desc ud = { 
-+		.base_addr = addr,
-+		.limit = 0xfffff,
-+		.seg_32bit = 1,
-+		.limit_in_pages = 1,
-+		.useable = 1,
-+	};
-+	struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	desc->a = LDT_entry_a(&ud); 
-+	desc->b = LDT_entry_b(&ud); 
-+}
++        /*
++         * Check that the device is in the D0 power state. If it's not,
++         * there is no point to look any further.
++         */
++        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
++        if (pm) {
++                pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
++                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
++                        return;
++        }
+ 
+ 	csr = ioremap(bar, 8);
+ 	if (!csr) {
+diff -r d894e36cfc30 -r 0aa021803deb drivers/pnp/manager.c
+--- a/drivers/pnp/manager.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/pnp/manager.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -168,7 +168,7 @@
+ 	return 0;
+ }
+ 
+-static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
++static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
+ {
+ 	resource_size_t *start, *end;
+ 	unsigned long *flags;
+@@ -179,18 +179,14 @@
+ 		1, 3, 5, 6, 7, 0, 2, 4
+ 	};
+ 
+-	if (!dev || !rule)
+-		return -EINVAL;
+-
+ 	if (idx >= PNP_MAX_DMA) {
+ 		pnp_err("More than 2 dmas is incompatible with pnp specifications.");
+-		/* pretend we were successful so at least the manager won't try again */
+-		return 1;
++		return;
+ 	}
+ 
+ 	/* check if this resource has been manually set, if so skip */
+ 	if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO))
+-		return 1;
++		return;
+ 
+ 	start = &dev->res.dma_resource[idx].start;
+ 	end = &dev->res.dma_resource[idx].end;
+@@ -200,19 +196,17 @@
+ 	*flags |= rule->flags | IORESOURCE_DMA;
+ 	*flags &=  ~IORESOURCE_UNSET;
+ 
+-	if (!rule->map) {
+-		*flags |= IORESOURCE_DISABLED;
+-		return 1; /* skip disabled resource requests */
+-	}
+-
+ 	for (i = 0; i < 8; i++) {
+ 		if(rule->map & (1<<xtab[i])) {
+ 			*start = *end = xtab[i];
+ 			if(pnp_check_dma(dev, idx))
+-				return 1;
++				return;
+ 		}
+ 	}
+-	return 0;
++#ifdef MAX_DMA_CHANNELS
++	*start = *end = MAX_DMA_CHANNELS;
++#endif
++	*flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ }
+ 
+ /**
+@@ -331,8 +325,7 @@
+ 			irq = irq->next;
+ 		}
+ 		while (dma) {
+-			if (!pnp_assign_dma(dev, dma, ndma))
+-				goto fail;
++			pnp_assign_dma(dev, dma, ndma);
+ 			ndma++;
+ 			dma = dma->next;
+ 		}
+@@ -367,8 +360,7 @@
+ 			irq = irq->next;
+ 		}
+ 		while (dma) {
+-			if (!pnp_assign_dma(dev, dma, ndma))
+-				goto fail;
++			pnp_assign_dma(dev, dma, ndma);
+ 			ndma++;
+ 			dma = dma->next;
+ 		}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/serial/Kconfig
+--- a/drivers/serial/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/serial/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -11,6 +11,7 @@
+ config SERIAL_8250
+ 	tristate "8250/16550 and compatible serial support"
+ 	depends on (BROKEN || !SPARC)
++	depends on !XEN_DISABLE_SERIAL
+ 	select SERIAL_CORE
+ 	---help---
+ 	  This selects whether you want to include the driver for the standard
+diff -r d894e36cfc30 -r 0aa021803deb drivers/video/Kconfig
+--- a/drivers/video/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/video/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -1254,7 +1254,7 @@
+ 	tristate "Cyberblade/i1 support"
+ 	depends on FB && PCI && X86_32 && !64BIT
+ 	select FB_CFB_IMAGEBLIT
+-	select VIDEO_SELECT
++	select VIDEO_SELECT if !XEN
+ 	---help---
+ 	  This driver is supposed to support the Trident Cyberblade/i1
+ 	  graphics core integrated in the VIA VT8601A North Bridge,
+diff -r d894e36cfc30 -r 0aa021803deb drivers/video/console/Kconfig
+--- a/drivers/video/console/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/drivers/video/console/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -53,6 +53,7 @@
+ config VIDEO_SELECT
+ 	bool "Video mode selection support"
+ 	depends on  X86 && VGA_CONSOLE
++	depends on !XEN
+ 	---help---
+ 	  This enables support for text mode selection on kernel startup. If
+ 	  you want to take advantage of some high-resolution text mode your
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/Kconfig
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,298 @@
++#
++# This Kconfig describe xen options
++#
 +
-+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-+{
-+	struct desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	return desc->base0 | 
-+		(((u32)desc->base1) << 16) | 
-+		(((u32)desc->base2) << 24);
-+}
++mainmenu "Xen Configuration"
 +
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+	unlazy_fpu(tsk);
-+}
++config XEN
++	bool
 +
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
-+		unsigned long unused,
-+	struct task_struct * p, struct pt_regs * regs)
-+{
-+	int err;
-+	struct pt_regs * childregs;
-+	struct task_struct *me = current;
++if XEN
++config XEN_INTERFACE_VERSION
++	hex
++	default 0x00030207
 +
-+	childregs = ((struct pt_regs *)
-+			(THREAD_SIZE + task_stack_page(p))) - 1;
-+	*childregs = *regs;
++menu "XEN"
 +
-+	childregs->rax = 0;
-+	childregs->rsp = rsp;
-+	if (rsp == ~0UL)
-+		childregs->rsp = (unsigned long)childregs;
++config XEN_PRIVILEGED_GUEST
++	bool "Privileged Guest (domain 0)"
++	help
++	  Support for privileged operation (domain 0)
++
++config XEN_UNPRIVILEGED_GUEST
++	def_bool !XEN_PRIVILEGED_GUEST
++
++config XEN_PRIVCMD
++	def_bool y
++	depends on PROC_FS
++
++config XEN_XENBUS_DEV
++	def_bool y
++	depends on PROC_FS
++
++config XEN_BACKEND
++        tristate "Backend driver support"
++        default XEN_PRIVILEGED_GUEST
++        help
++          Support for backend device drivers that provide I/O services
++          to other virtual machines.
++
++config XEN_BLKDEV_BACKEND
++	tristate "Block-device backend driver"
++        depends on XEN_BACKEND
++	default XEN_BACKEND
++	help
++	  The block-device backend driver allows the kernel to export its
++	  block devices to other guests via a high-performance shared-memory
++	  interface.
++
++config XEN_BLKDEV_TAP
++	tristate "Block-device tap backend driver"
++	depends on XEN_BACKEND
++	default XEN_BACKEND
++	help
++	  The block tap driver is an alternative to the block back driver 
++          and allows VM block requests to be redirected to userspace through
++          a device interface.  The tap allows user-space development of 
++          high-performance block backends, where disk images may be implemented
++          as files, in memory, or on other hosts across the network.  This 
++	  driver can safely coexist with the existing blockback driver.
++
++config XEN_NETDEV_BACKEND
++	tristate "Network-device backend driver"
++        depends on XEN_BACKEND && NET
++	default XEN_BACKEND
++	help
++	  The network-device backend driver allows the kernel to export its
++	  network devices to other guests via a high-performance shared-memory
++	  interface.
++
++config XEN_NETDEV_PIPELINED_TRANSMITTER
++	bool "Pipelined transmitter (DANGEROUS)"
++	depends on XEN_NETDEV_BACKEND
++	help
++	  If the net backend is a dumb domain, such as a transparent Ethernet
++	  bridge with no local IP interface, it is safe to say Y here to get
++	  slightly lower network overhead.
++	  If the backend has a local IP interface; or may be doing smart things
++	  like reassembling packets to perform firewall filtering; or if you
++	  are unsure; or if you experience network hangs when this option is
++	  enabled; then you must say N here.
++
++config XEN_NETDEV_LOOPBACK
++	tristate "Network-device loopback driver"
++	depends on XEN_NETDEV_BACKEND
++	help
++	  A two-interface loopback device to emulate a local netfront-netback
++	  connection. If unsure, it is probably safe to say N here.
++
++config XEN_PCIDEV_BACKEND
++	tristate "PCI-device backend driver"
++	depends on PCI && XEN_BACKEND
++	default XEN_BACKEND
++	help
++	  The PCI device backend driver allows the kernel to export arbitrary
++	  PCI devices to other guests. If you select this to be a module, you
++	  will need to make sure no other driver has bound to the device(s)
++	  you want to make visible to other guests.
++
++choice
++	prompt "PCI Backend Mode"
++	depends on XEN_PCIDEV_BACKEND
++	default XEN_PCIDEV_BACKEND_VPCI if !IA64
++	default XEN_PCIDEV_BACKEND_CONTROLLER if IA64
++
++config XEN_PCIDEV_BACKEND_VPCI
++	bool "Virtual PCI"
++	---help---
++	  This PCI Backend hides the true PCI topology and makes the frontend
++	  think there is a single PCI bus with only the exported devices on it.
++	  For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
++	  second device at 02:1a.1 will be re-assigned to 00:01.1.
++
++config XEN_PCIDEV_BACKEND_PASS
++	bool "Passthrough"
++	---help---
++	  This PCI Backend provides a real view of the PCI topology to the
++	  frontend (for example, a device at 06:01.b will still appear at
++	  06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
++	  PCI devices to its driver domains. This may be required for drivers
++	  which depend on finding their hardward in certain bus/slot
++	  locations.
++
++config XEN_PCIDEV_BACKEND_SLOT
++	bool "Slot"
++	---help---
++	  This PCI Backend hides the true PCI topology and makes the frontend
++	  think there is a single PCI bus with only the exported devices on it.
++	  Contrary to the virtual PCI backend, a function becomes a new slot.
++	  For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
++	  second device at 02:1a.1 will be re-assigned to 00:01.0.
++
++config XEN_PCIDEV_BACKEND_CONTROLLER
++	bool "Controller"
++	depends on IA64
++	---help---
++	  This PCI backend virtualizes the PCI bus topology by providing a
++	  virtual bus per PCI root device.  Devices which are physically under
++	  the same root bus will appear on the same virtual bus.  For systems
++	  with complex I/O addressing, this is the only backend which supports
++	  extended I/O port spaces and MMIO translation offsets.  This backend
++	  also supports slot virtualization.  For example, a device at
++	  0000:01:02.1 will be re-assigned to 0000:00:00.0.  A second device
++	  at 0000:02:05.0 (behind a P2P bridge on bus 0000:01) will be
++	  re-assigned to 0000:00:01.0.  A third device at 0000:16:05.0 (under
++	  a different PCI root bus) will be re-assigned to 0000:01:00.0.
++
++endchoice
++
++config XEN_PCIDEV_BE_DEBUG
++	bool "PCI Backend Debugging"
++	depends on XEN_PCIDEV_BACKEND
++
++config XEN_TPMDEV_BACKEND
++	tristate "TPM-device backend driver"
++        depends on XEN_BACKEND
++	help
++	  The TPM-device backend driver
++
++config XEN_SCSI_BACKEND
++	tristate "SCSI backend driver"
++	depends on SCSI && XEN_BACKEND
++	default m
++	help
++	  The SCSI backend driver allows the kernel to export its SCSI Devices
++	  to other guests via a high-performance shared-memory interface.
++
++config XEN_BLKDEV_FRONTEND
++	tristate "Block-device frontend driver"
++	default y
++	help
++	  The block-device frontend driver allows the kernel to access block
++	  devices mounted within another guest OS. Unless you are building a
++	  dedicated device-driver domain, or your master control domain
++	  (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_FRONTEND
++	tristate "Network-device frontend driver"
++	depends on NET
++	default y
++	help
++	  The network-device frontend driver allows the kernel to access
++	  network interfaces within another guest OS. Unless you are building a
++	  dedicated device-driver domain, or your master control domain
++	  (domain 0), then you almost certainly want to say Y here.
++
++config XEN_SCSI_FRONTEND
++	tristate "SCSI frontend driver"
++	depends on SCSI
++	default m
++	help
++	  The SCSI frontend driver allows the kernel to access SCSI Devices
++	  within another guest OS.
++
++config XEN_GRANT_DEV
++	tristate "User-space granted page access driver"
++	default XEN_PRIVILEGED_GUEST
++	help
++	  Device for accessing (in user-space) pages that have been granted
++	  by other domains.
++
++config XEN_FRAMEBUFFER
++	tristate "Framebuffer-device frontend driver"
++	depends on FB
++	select FB_CFB_FILLRECT
++	select FB_CFB_COPYAREA
++	select FB_CFB_IMAGEBLIT
++	default y
++	help
++	  The framebuffer-device frontend drivers allows the kernel to create a
++	  virtual framebuffer.  This framebuffer can be viewed in another
++	  domain.  Unless this domain has access to a real video card, you
++	  probably want to say Y here.
++
++config XEN_KEYBOARD
++	tristate "Keyboard-device frontend driver"
++	depends on XEN_FRAMEBUFFER && INPUT
++	default y
++	help
++	  The keyboard-device frontend driver allows the kernel to create a
++	  virtual keyboard.  This keyboard can then be driven by another
++	  domain.  If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
++	  want to say Y here.
++
++config XEN_SCRUB_PAGES
++	bool "Scrub memory before freeing it to Xen"
++	default y
++	help
++	  Erase memory contents before freeing it back to Xen's global
++	  pool. This ensures that any secrets contained within that
++	  memory (e.g., private keys) cannot be found by other guests that
++	  may be running on the machine. Most people will want to say Y here.
++	  If security is not a concern then you may increase performance by
++	  saying N.
++
++config XEN_DISABLE_SERIAL
++	bool "Disable serial port drivers"
++	default y
++	help
++	  Disable serial port drivers, allowing the Xen console driver
++	  to provide a serial console at ttyS0.
++
++config XEN_SYSFS
++	tristate "Export Xen attributes in sysfs"
++	depends on SYSFS
++	select SYS_HYPERVISOR
++	default y
++	help
++	  Xen hypervisor attributes will show up under /sys/hypervisor/.
++
++choice
++	prompt "Xen version compatibility"
++	default XEN_COMPAT_030002_AND_LATER
++
++	config XEN_COMPAT_030002_AND_LATER
++		bool "3.0.2 and later"
++
++	config XEN_COMPAT_030004_AND_LATER
++		bool "3.0.4 and later"
++
++	config XEN_COMPAT_030100_AND_LATER
++		bool "3.1.0 and later"
++
++	config XEN_COMPAT_LATEST_ONLY
++		bool "no compatibility code"
++
++endchoice
++
++config XEN_COMPAT
++	hex
++	default 0xffffff if XEN_COMPAT_LATEST_ONLY
++	default 0x030100 if XEN_COMPAT_030100_AND_LATER
++	default 0x030004 if XEN_COMPAT_030004_AND_LATER
++	default 0x030002 if XEN_COMPAT_030002_AND_LATER
++	default 0
++
++endmenu
 +
-+	p->thread.rsp = (unsigned long) childregs;
-+	p->thread.rsp0 = (unsigned long) (childregs+1);
-+	p->thread.userrsp = me->thread.userrsp; 
++config HAVE_IRQ_IGNORE_UNHANDLED
++	def_bool y
 +
-+	set_tsk_thread_flag(p, TIF_FORK);
++config NO_IDLE_HZ
++	def_bool y
 +
-+	p->thread.fs = me->thread.fs;
-+	p->thread.gs = me->thread.gs;
++config XEN_SMPBOOT
++	def_bool y
++	depends on SMP && !PPC_XEN
 +
-+	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-+	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-+	asm("mov %%es,%0" : "=m" (p->thread.es));
-+	asm("mov %%ds,%0" : "=m" (p->thread.ds));
++config XEN_BALLOON
++	def_bool y
++	depends on !PPC_XEN
 +
-+	if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 
-+		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!p->thread.io_bitmap_ptr) {
-+			p->thread.io_bitmap_max = 0;
-+			return -ENOMEM;
-+		}
-+		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
-+				IO_BITMAP_BYTES);
-+	} 
++config XEN_XENCOMM
++	bool
 +
-+	/*
-+	 * Set a new TLS for the child thread?
-+	 */
-+	if (clone_flags & CLONE_SETTLS) {
-+#ifdef CONFIG_IA32_EMULATION
-+		if (test_thread_flag(TIF_IA32))
-+			err = ia32_child_tls(p, childregs); 
-+		else 			
-+#endif	 
-+			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
-+		if (err) 
-+			goto out;
-+	}
-+        p->thread.iopl = current->thread.iopl;
++config XEN_DEVMEM
++	def_bool y
 +
-+	err = 0;
-+out:
-+	if (err && p->thread.io_bitmap_ptr) {
-+		kfree(p->thread.io_bitmap_ptr);
-+		p->thread.io_bitmap_max = 0;
-+	}
-+	return err;
-+}
++endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,22 @@
++obj-y	+= core/
++obj-y	+= console/
++obj-y	+= evtchn/
++obj-y	+= xenbus/
++obj-y	+= char/
 +
-+static inline void __save_init_fpu( struct task_struct *tsk )
-+{
-+	asm volatile( "rex64 ; fxsave %0 ; fnclex"
-+		      : "=m" (tsk->thread.i387.fxsave));
-+	tsk->thread_info->status &= ~TS_USEDFPU;
-+}
++obj-y	+= util.o
++obj-$(CONFIG_XEN_BALLOON)		+= balloon/
++obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
++obj-$(CONFIG_XEN_BLKDEV_TAP)		+= blktap/
++obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
++obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmback/
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
++obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
++obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront/
++obj-$(CONFIG_XEN_FRAMEBUFFER)		+= fbfront/
++obj-$(CONFIG_XEN_KEYBOARD)		+= fbfront/
++obj-$(CONFIG_XEN_SCSI_BACKEND)		+= scsiback/
++obj-$(CONFIG_XEN_SCSI_FRONTEND)		+= scsifront/
++obj-$(CONFIG_XEN_PRIVCMD)	+= privcmd/
++obj-$(CONFIG_XEN_GRANT_DEV)	+= gntdev/
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/balloon/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/balloon/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
 +
-+/*
-+ *	switch_to(x,y) should switch tasks from x to y.
++obj-y := balloon.o sysfs.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/balloon/balloon.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/balloon/balloon.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,724 @@
++/******************************************************************************
++ * balloon.c
 + *
-+ * This could still be optimized: 
-+ * - fold all the options into a flag word and test it with a single test.
-+ * - could test fs/gs bitsliced
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
 + *
-+ * Kprobes not supported here. Set the probe on schedule instead.
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+__kprobes struct task_struct *
-+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+	struct thread_struct *prev = &prev_p->thread,
-+				 *next = &next_p->thread;
-+	int cpu = smp_processor_id();  
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include <linux/mutex.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++#include <asm/maddr.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <linux/highmem.h>
++#include <linux/list.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
 +#endif
-+	struct physdev_set_iopl iopl_op;
-+	struct physdev_set_iobitmap iobmp_op;
-+	multicall_entry_t _mcl[8], *mcl = _mcl;
 +
-+	/*
-+	 * This is basically '__unlazy_fpu', except that we queue a
-+	 * multicall to indicate FPU task switch, rather than
-+	 * synchronously trapping to Xen.
-+	 * The AMD workaround requires it to be after DS reload, or
-+	 * after DS has been cleared, which we do in __prepare_arch_switch.
-+	 */
-+	if (prev_p->thread_info->status & TS_USEDFPU) {
-+		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+		mcl->op      = __HYPERVISOR_fpu_taskswitch;
-+		mcl->args[0] = 1;
-+		mcl++;
-+	}
++#ifdef CONFIG_PROC_FS
++static struct proc_dir_entry *balloon_pde;
++#endif
 +
-+	/*
-+	 * Reload esp0, LDT and the page table pointer:
-+	 */
-+	mcl->op      = __HYPERVISOR_stack_switch;
-+	mcl->args[0] = __KERNEL_DS;
-+	mcl->args[1] = next->rsp0;
-+	mcl++;
++static DEFINE_MUTEX(balloon_mutex);
 +
-+	/*
-+	 * Load the per-thread Thread-Local Storage descriptor.
-+	 * This is load_TLS(next, cpu) with multicalls.
-+	 */
-+#define C(i) do {							\
-+	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
-+		mcl->op      = __HYPERVISOR_update_descriptor;		\
-+		mcl->args[0] = virt_to_machine(				\
-+			&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]);		\
-+		mcl->args[1] = next->tls_array[i];			\
-+		mcl++;							\
-+	}								\
-+} while (0)
-+	C(0); C(1); C(2);
-+#undef C
++/*
++ * Protects atomic reservation decrease/increase against concurrent increases.
++ * Also protects non-atomic updates of current_pages and driver_pages, and
++ * balloon lists.
++ */
++DEFINE_SPINLOCK(balloon_lock);
 +
-+	if (unlikely(prev->iopl != next->iopl)) {
-+		iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iopl;
-+		mcl->args[1] = (unsigned long)&iopl_op;
-+		mcl++;
-+	}
++struct balloon_stats balloon_stats;
 +
-+	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+		set_xen_guest_handle(iobmp_op.bitmap,
-+				     (char *)next->io_bitmap_ptr);
-+		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iobitmap;
-+		mcl->args[1] = (unsigned long)&iobmp_op;
-+		mcl++;
-+	}
++/* We increase/decrease in batches which fit in a page */
++static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
 +
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+	/* 
-+	 * Switch DS and ES.
-+	 * This won't pick up thread selector changes, but I guess that is ok.
-+	 */
-+	if (unlikely(next->es))
-+		loadsegment(es, next->es); 
-+	
-+	if (unlikely(next->ds))
-+		loadsegment(ds, next->ds);
++/* VM /proc information for memory */
++extern unsigned long totalram_pages;
 +
-+	/* 
-+	 * Switch FS and GS.
-+	 */
-+	if (unlikely(next->fsindex))
-+		loadsegment(fs, next->fsindex);
++#ifndef MODULE
++extern unsigned long totalhigh_pages;
++#define inc_totalhigh_pages() (totalhigh_pages++)
++#define dec_totalhigh_pages() (totalhigh_pages--)
++#else
++#define inc_totalhigh_pages() ((void)0)
++#define dec_totalhigh_pages() ((void)0)
++#endif
 +
-+	if (next->fs)
-+		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
-+	
-+	if (unlikely(next->gsindex))
-+		load_gs_index(next->gsindex);
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
 +
-+	if (next->gs)
-+		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
 +
-+	/* 
-+	 * Switch the PDA context.
-+	 */
-+	prev->userrsp = read_pda(oldrsp); 
-+	write_pda(oldrsp, next->userrsp); 
-+	write_pda(pcurrent, next_p); 
-+	write_pda(kernelstack,
-+		  task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++/* When ballooning out (allocating memory to return to Xen) we don't really 
++   want the kernel to try too hard since that can trigger the oom killer. */
++#define GFP_BALLOON \
++	(GFP_HIGHUSER|__GFP_NOWARN|__GFP_NORETRY|__GFP_NOMEMALLOC|__GFP_COLD)
 +
-+	/*
-+	 * Now maybe reload the debug registers
-+	 */
-+	if (unlikely(next->debugreg7)) {
-+		set_debugreg(next->debugreg0, 0);
-+		set_debugreg(next->debugreg1, 1);
-+		set_debugreg(next->debugreg2, 2);
-+		set_debugreg(next->debugreg3, 3);
-+		/* no 4 and 5 */
-+		set_debugreg(next->debugreg6, 6);
-+		set_debugreg(next->debugreg7, 7);
-+	}
++#define PAGE_TO_LIST(p) (&(p)->lru)
++#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
++#define UNLIST_PAGE(p)				\
++	do {					\
++		list_del(PAGE_TO_LIST(p));	\
++		PAGE_TO_LIST(p)->next = NULL;	\
++		PAGE_TO_LIST(p)->prev = NULL;	\
++	} while(0)
 +
-+	return prev_p;
++#define IPRINTK(fmt, args...) \
++	printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++	printk(KERN_WARNING "xen_mem: " fmt, ##args)
++
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
++{
++	/* Lowmem is re-populated first, so highmem pages go at list tail. */
++	if (PageHighMem(page)) {
++		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++		bs.balloon_high++;
++		dec_totalhigh_pages();
++	} else {
++		list_add(PAGE_TO_LIST(page), &ballooned_pages);
++		bs.balloon_low++;
++	}
 +}
 +
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage 
-+long sys_execve(char __user *name, char __user * __user *argv,
-+		char __user * __user *envp, struct pt_regs regs)
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
 +{
-+	long error;
-+	char * filename;
++	struct page *page;
 +
-+	filename = getname(name);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename)) 
-+		return error;
-+	error = do_execve(filename, argv, envp, &regs); 
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
++	if (list_empty(&ballooned_pages))
++		return NULL;
++
++	page = LIST_TO_PAGE(ballooned_pages.next);
++	UNLIST_PAGE(page);
++
++	if (PageHighMem(page)) {
++		bs.balloon_high--;
++		inc_totalhigh_pages();
 +	}
-+	putname(filename);
-+	return error;
++	else
++		bs.balloon_low--;
++
++	return page;
 +}
 +
-+void set_personality_64bit(void)
++static struct page *balloon_first_page(void)
 +{
-+	/* inherit personality from parent */
-+
-+	/* Make sure to be in 64bit mode */
-+	clear_thread_flag(TIF_IA32); 
++	if (list_empty(&ballooned_pages))
++		return NULL;
++	return LIST_TO_PAGE(ballooned_pages.next);
++}
 +
-+	/* TBD: overwrites user setup. Should have two bits.
-+	   But 64bit processes have always behaved this way,
-+	   so it's not too bad. The main problem is just that
-+   	   32bit childs are affected again. */
-+	current->personality &= ~READ_IMPLIES_EXEC;
++static struct page *balloon_next_page(struct page *page)
++{
++	struct list_head *next = PAGE_TO_LIST(page)->next;
++	if (next == &ballooned_pages)
++		return NULL;
++	return LIST_TO_PAGE(next);
 +}
 +
-+asmlinkage long sys_fork(struct pt_regs *regs)
++static inline void balloon_free_page(struct page *page)
 +{
-+	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++#ifndef MODULE
++	if (put_page_testzero(page))
++		free_cold_page(page);
++#else
++	/* free_cold_page() is not being exported. */
++	__free_page(page);
++#endif
 +}
 +
-+asmlinkage long
-+sys_clone(unsigned long clone_flags, unsigned long newsp,
-+	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++static void balloon_alarm(unsigned long unused)
 +{
-+	if (!newsp)
-+		newsp = regs->rsp;
-+	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++	schedule_work(&balloon_worker);
 +}
 +
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage long sys_vfork(struct pt_regs *regs)
++static unsigned long current_target(void)
 +{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+		    NULL, NULL);
++	unsigned long target = min(bs.target_pages, bs.hard_limit);
++	if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
++		target = bs.current_pages + bs.balloon_low + bs.balloon_high;
++	return target;
 +}
 +
-+unsigned long get_wchan(struct task_struct *p)
++static unsigned long minimum_target(void)
 +{
-+	unsigned long stack;
-+	u64 fp,rip;
-+	int count = 0;
++#ifndef CONFIG_XEN
++#define max_pfn num_physpages
++#endif
++	unsigned long min_pages, curr_pages = current_target();
 +
-+	if (!p || p == current || p->state==TASK_RUNNING)
-+		return 0; 
-+	stack = (unsigned long)task_stack_page(p);
-+	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+		return 0;
-+	fp = *(u64 *)(p->thread.rsp);
-+	do { 
-+		if (fp < (unsigned long)stack ||
-+		    fp > (unsigned long)stack+THREAD_SIZE)
-+			return 0; 
-+		rip = *(u64 *)(fp+8); 
-+		if (!in_sched_functions(rip))
-+			return rip; 
-+		fp = *(u64 *)fp; 
-+	} while (count++ < 16); 
-+	return 0;
++#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
++	/* Simple continuous piecewiese linear function:
++	 *  max MiB -> min MiB	gradient
++	 *       0	   0
++	 *      16	  16
++	 *      32	  24
++	 *     128	  72	(1/2)
++	 *     512 	 168	(1/4)
++	 *    2048	 360	(1/8)
++	 *    8192	 552	(1/32)
++	 *   32768	1320
++	 *  131072	4392
++	 */
++	if (max_pfn < MB2PAGES(128))
++		min_pages = MB2PAGES(8) + (max_pfn >> 1);
++	else if (max_pfn < MB2PAGES(512))
++		min_pages = MB2PAGES(40) + (max_pfn >> 2);
++	else if (max_pfn < MB2PAGES(2048))
++		min_pages = MB2PAGES(104) + (max_pfn >> 3);
++	else
++		min_pages = MB2PAGES(296) + (max_pfn >> 5);
++#undef MB2PAGES
++
++	/* Don't enforce growth */
++	return min(min_pages, curr_pages);
++#ifndef CONFIG_XEN
++#undef max_pfn
++#endif
 +}
 +
-+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-+{ 
-+	int ret = 0; 
-+	int doit = task == current;
-+	int cpu;
++static int increase_reservation(unsigned long nr_pages)
++{
++	unsigned long  pfn, i, flags;
++	struct page   *page;
++	long           rc;
++	struct xen_memory_reservation reservation = {
++		.address_bits = 0,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
 +
-+	switch (code) { 
-+	case ARCH_SET_GS:
-+		if (addr >= TASK_SIZE_OF(task))
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) {  
-+			set_32bit_tls(task, GS_TLS, addr); 
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu);
-+				load_gs_index(GS_TLS_SEL); 
-+			}
-+			task->thread.gsindex = GS_TLS_SEL; 
-+			task->thread.gs = 0;
-+		} else { 
-+			task->thread.gsindex = 0;
-+			task->thread.gs = addr;
-+			if (doit) {
-+				load_gs_index(0);
-+				ret = HYPERVISOR_set_segment_base(
-+					SEGBASE_GS_USER, addr);
-+			} 
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_SET_FS:
-+		/* Not strictly needed for fs, but do it for symmetry
-+		   with gs */
-+		if (addr >= TASK_SIZE_OF(task))
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) { 
-+			set_32bit_tls(task, FS_TLS, addr);
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu); 
-+				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
-+			}
-+			task->thread.fsindex = FS_TLS_SEL;
-+			task->thread.fs = 0;
-+		} else { 
-+			task->thread.fsindex = 0;
-+			task->thread.fs = addr;
-+			if (doit) {
-+				/* set the selector to 0 to not confuse
-+				   __switch_to */
-+				asm volatile("movl %0,%%fs" :: "r" (0));
-+                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
-+								  addr);
-+			}
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_GET_FS: { 
-+		unsigned long base; 
-+		if (task->thread.fsindex == FS_TLS_SEL)
-+			base = read_32bit_tls(task, FS_TLS);
-+		else if (doit)
-+			rdmsrl(MSR_FS_BASE, base);
-+		else
-+			base = task->thread.fs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break; 
++	if (nr_pages > ARRAY_SIZE(frame_list))
++		nr_pages = ARRAY_SIZE(frame_list);
++
++	balloon_lock(flags);
++
++	page = balloon_first_page();
++	for (i = 0; i < nr_pages; i++) {
++		BUG_ON(page == NULL);
++		frame_list[i] = page_to_pfn(page);;
++		page = balloon_next_page(page);
 +	}
-+	case ARCH_GET_GS: { 
-+		unsigned long base;
-+		unsigned gsindex;
-+		if (task->thread.gsindex == GS_TLS_SEL)
-+			base = read_32bit_tls(task, GS_TLS);
-+		else if (doit) {
-+ 			asm("movl %%gs,%0" : "=r" (gsindex));
-+			if (gsindex)
-+				rdmsrl(MSR_KERNEL_GS_BASE, base);
-+			else
-+				base = task->thread.gs;
++
++	set_xen_guest_handle(reservation.extent_start, frame_list);
++	reservation.nr_extents   = nr_pages;
++	rc = HYPERVISOR_memory_op(
++		XENMEM_populate_physmap, &reservation);
++	if (rc < nr_pages) {
++		if (rc > 0) {
++			int ret;
++
++			/* We hit the Xen hard limit: reprobe. */
++			reservation.nr_extents = rc;
++			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					&reservation);
++			BUG_ON(ret != rc);
 +		}
-+		else
-+			base = task->thread.gs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break;
++		if (rc >= 0)
++			bs.hard_limit = (bs.current_pages + rc -
++					 bs.driver_pages);
++		goto out;
 +	}
 +
-+	default:
-+		ret = -EINVAL;
-+		break;
-+	} 
++	for (i = 0; i < nr_pages; i++) {
++		page = balloon_retrieve();
++		BUG_ON(page == NULL);
 +
-+	return ret;	
-+} 
++		pfn = page_to_pfn(page);
++		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
++		       phys_to_machine_mapping_valid(pfn));
 +
-+long sys_arch_prctl(int code, unsigned long addr)
-+{
-+	return do_arch_prctl(current, code, addr);
-+} 
++		set_phys_to_machine(pfn, frame_list[i]);
 +
-+/* 
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+	struct pt_regs *pp, ptregs;
++#ifdef CONFIG_XEN
++		/* Link back into the page tables if not highmem. */
++		if (pfn < max_low_pfn) {
++			int ret;
++			ret = HYPERVISOR_update_va_mapping(
++				(unsigned long)__va(pfn << PAGE_SHIFT),
++				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
++				0);
++			BUG_ON(ret);
++		}
++#endif
 +
-+	pp = task_pt_regs(tsk);
++		/* Relinquish the page back to the allocator. */
++		ClearPageReserved(page);
++		init_page_count(page);
++		balloon_free_page(page);
++	}
 +
-+	ptregs = *pp; 
-+	ptregs.cs &= 0xffff;
-+	ptregs.ss &= 0xffff;
++	bs.current_pages += nr_pages;
++	totalram_pages = bs.current_pages;
 +
-+	elf_core_copy_regs(regs, &ptregs);
-+ 
-+        boot_option_idle_override = 1;
-+	return 1;
-+}
++ out:
++	balloon_unlock(flags);
 +
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+	if (randomize_va_space)
-+		sp -= get_random_int() % 8192;
-+	return sp & ~0xf;
++	return 0;
 +}
 +
-+#ifndef CONFIG_SMP
-+void _restore_vcpu(void)
++static int decrease_reservation(unsigned long nr_pages)
 +{
-+}
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/relocate_kernel.S tmp-linux-2.6-xen.patch/arch/x86_64/kernel/relocate_kernel.S
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/relocate_kernel.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/relocate_kernel.S	2007-10-14 01:51:15.000000000 +0200
-@@ -7,31 +7,195 @@
-  */
- 
- #include <linux/linkage.h>
-+#include <asm/page.h>
-+#include <asm/kexec.h>
- 
--	/*
--	 * Must be relocatable PIC code callable as a C function, that once
--	 * it starts can not use the previous processes stack.
--	 */
--	.globl relocate_new_kernel
-+/*
-+ * Must be relocatable PIC code callable as a C function
-+ */
++	unsigned long  pfn, i, flags;
++	struct page   *page;
++	void          *v;
++	int            need_sleep = 0;
++	int ret;
++	struct xen_memory_reservation reservation = {
++		.address_bits = 0,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
 +
-+#define PTR(x) (x << 3)
-+#define PAGE_ALIGNED (1 << PAGE_SHIFT)
-+#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
++	if (nr_pages > ARRAY_SIZE(frame_list))
++		nr_pages = ARRAY_SIZE(frame_list);
 +
-+	.text
-+	.align PAGE_ALIGNED
- 	.code64
-+	.globl relocate_kernel
-+relocate_kernel:
-+	/* %rdi indirection_page
-+	 * %rsi page_list
-+	 * %rdx start address
-+	 */
++	for (i = 0; i < nr_pages; i++) {
++		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
++			nr_pages = i;
++			need_sleep = 1;
++			break;
++		}
 +
-+	/* map the control page at its virtual address */
++		pfn = page_to_pfn(page);
++		frame_list[i] = pfn_to_mfn(pfn);
 +
-+	movq	$0x0000ff8000000000, %r10        /* mask */
-+	mov	$(39 - 3), %cl                   /* bits to shift */
-+	movq	PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++		if (!PageHighMem(page)) {
++			v = phys_to_virt(pfn << PAGE_SHIFT);
++			scrub_pages(v, 1);
++#ifdef CONFIG_XEN
++			ret = HYPERVISOR_update_va_mapping(
++				(unsigned long)v, __pte_ma(0), 0);
++			BUG_ON(ret);
++#endif
++		}
++#ifdef CONFIG_XEN_SCRUB_PAGES
++		else {
++			v = kmap(page);
++			scrub_pages(v, 1);
++			kunmap(page);
++		}
++#endif
++	}
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++#ifdef CONFIG_XEN
++	/* Ensure that ballooned highmem pages don't have kmaps. */
++	kmap_flush_unused();
++	flush_tlb_all();
++#endif
 +
-+	movq	PTR(VA_PGD)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PUD_0)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++	balloon_lock(flags);
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++	/* No more mappings: invalidate P2M and add to balloon. */
++	for (i = 0; i < nr_pages; i++) {
++		pfn = mfn_to_pfn(frame_list[i]);
++		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++		balloon_append(pfn_to_page(pfn));
++	}
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	set_xen_guest_handle(reservation.extent_start, frame_list);
++	reservation.nr_extents   = nr_pages;
++	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++	BUG_ON(ret != nr_pages);
 +
-+	movq	PTR(VA_PUD_0)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PMD_0)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++	bs.current_pages -= nr_pages;
++	totalram_pages = bs.current_pages;
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++	balloon_unlock(flags);
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	return need_sleep;
++}
 +
-+	movq	PTR(VA_PMD_0)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PTE_0)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++/*
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
++ */
++static void balloon_process(void *unused)
++{
++	int need_sleep = 0;
++	long credit;
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++	mutex_lock(&balloon_mutex);
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	do {
++		credit = current_target() - bs.current_pages;
++		if (credit > 0)
++			need_sleep = (increase_reservation(credit) != 0);
++		if (credit < 0)
++			need_sleep = (decrease_reservation(-credit) != 0);
 +
-+	movq	PTR(VA_PTE_0)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++#ifndef CONFIG_PREEMPT
++		if (need_resched())
++			schedule();
++#endif
++	} while ((credit != 0) && !need_sleep);
 +
-+	/* identity map the control page at its physical address */
++	/* Schedule more work if there is some still to be done. */
++	if (current_target() != bs.current_pages)
++		mod_timer(&balloon_timer, jiffies + HZ);
 +
-+	movq	$0x0000ff8000000000, %r10        /* mask */
-+	mov	$(39 - 3), %cl                   /* bits to shift */
-+	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++	mutex_unlock(&balloon_mutex);
++}
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++void balloon_set_new_target(unsigned long target)
++{
++	/* No need for lock. Not read-modify-write updates. */
++	bs.hard_limit   = ~0UL;
++	bs.target_pages = max(target, minimum_target());
++	schedule_work(&balloon_worker);
++}
 +
-+	movq	PTR(VA_PGD)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PUD_1)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++static struct xenbus_watch target_watch =
++{
++	.node = "memory/target"
++};
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++/* React to a change in the target key */
++static void watch_target(struct xenbus_watch *watch,
++			 const char **vec, unsigned int len)
++{
++	unsigned long long new_target;
++	int err;
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
++	if (err != 1) {
++		/* This is ok (for domain0 at least) - so just return */
++		return;
++	}
 +
-+	movq	PTR(VA_PUD_1)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PMD_1)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++	/* The given memory/target value is in KiB, so it needs converting to
++	 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
++	 */
++	balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
++}
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++static int balloon_init_watcher(struct notifier_block *notifier,
++				unsigned long event,
++				void *data)
++{
++	int err;
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	err = register_xenbus_watch(&target_watch);
++	if (err)
++		printk(KERN_ERR "Failed to set balloon watcher\n");
 +
-+	movq	PTR(VA_PMD_1)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_PTE_1)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++	return NOTIFY_DONE;
++}
 +
-+	shrq	$9, %r10
-+	sub	$9, %cl
++#ifdef CONFIG_PROC_FS
++static int balloon_write(struct file *file, const char __user *buffer,
++			 unsigned long count, void *data)
++{
++	char memstring[64], *endchar;
++	unsigned long long target_bytes;
 +
-+	movq	%r11, %r9
-+	andq	%r10, %r9
-+	shrq	%cl, %r9
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
 +
-+	movq	PTR(VA_PTE_1)(%rsi), %r8
-+	addq	%r8, %r9
-+	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
-+	orq	$PAGE_ATTR, %r8
-+	movq	%r8, (%r9)
++	if (count <= 1)
++		return -EBADMSG; /* runt */
++	if (count > sizeof(memstring))
++		return -EFBIG;   /* too long */
 +
- relocate_new_kernel:
--	/* %rdi page_list
--	 * %rsi reboot_code_buffer
-+	/* %rdi indirection_page
-+	 * %rsi page_list
- 	 * %rdx start address
--	 * %rcx page_table
--	 * %r8  arg5
--	 * %r9  arg6
- 	 */
- 
- 	/* zero out flags, and disable interrupts */
- 	pushq $0
- 	popfq
- 
--	/* set a new stack at the bottom of our page... */
--	lea   4096(%rsi), %rsp
--
--	/* store the parameters back on the stack */
--	pushq	%rdx /* store the start address */
-+	/* get physical address of control page now */
-+	/* this is impossible after page table switch */
-+	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
++	if (copy_from_user(memstring, buffer, count))
++		return -EFAULT;
++	memstring[sizeof(memstring)-1] = '\0';
 +
-+	/* get physical address of page table now too */
-+	movq	PTR(PA_TABLE_PAGE)(%rsi), %rcx
++	target_bytes = memparse(memstring, &endchar);
++	balloon_set_new_target(target_bytes >> PAGE_SHIFT);
 +
-+	/* switch to new set of page tables */
-+	movq	PTR(PA_PGD)(%rsi), %r9
-+	movq	%r9, %cr3
++	return count;
++}
 +
-+	/* setup idt */
-+	movq    %r8, %rax
-+	addq    $(idt_80 - relocate_kernel), %rax
-+	lidtq   (%rax)
++static int balloon_read(char *page, char **start, off_t off,
++			int count, int *eof, void *data)
++{
++	int len;
 +
-+	/* setup gdt */
-+	movq    %r8, %rax
-+	addq    $(gdt - relocate_kernel), %rax
-+	movq    %r8, %r9
-+	addq    $((gdt_80 - relocate_kernel) + 2), %r9
-+	movq    %rax, (%r9)
++	len = sprintf(
++		page,
++		"Current allocation: %8lu kB\n"
++		"Requested target:   %8lu kB\n"
++		"Low-mem balloon:    %8lu kB\n"
++		"High-mem balloon:   %8lu kB\n"
++		"Driver pages:       %8lu kB\n"
++		"Xen hard limit:     ",
++		PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), 
++		PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
++		PAGES2KB(bs.driver_pages));
 +
-+	movq    %r8, %rax
-+	addq    $(gdt_80 - relocate_kernel), %rax
-+	lgdtq   (%rax)
++	if (bs.hard_limit != ~0UL)
++		len += sprintf(page + len, "%8lu kB\n",
++			       PAGES2KB(bs.hard_limit));
++	else
++		len += sprintf(page + len, "     ??? kB\n");
 +
-+	/* setup data segment registers */
-+	xorl	%eax, %eax
-+	movl    %eax, %ds
-+	movl    %eax, %es
-+	movl    %eax, %fs
-+	movl    %eax, %gs
-+	movl    %eax, %ss
-+	
-+	/* setup a new stack at the end of the physical control page */
-+	lea	4096(%r8), %rsp
++	*eof = 1;
++	return len;
++}
++#endif
 +
-+	/* load new code segment and jump to identity mapped page */
-+	movq	%r8, %rax
-+	addq    $(identity_mapped - relocate_kernel), %rax
-+	pushq	$(gdt_cs - gdt)
-+	pushq	%rax
-+	lretq
++static struct notifier_block xenstore_notifier;
 +
-+identity_mapped:
-+	/* store the start address on the stack */
-+	pushq   %rdx
- 
- 	/* Set cr0 to a known state:
- 	 * 31 1 == Paging enabled
-@@ -134,10 +298,19 @@ relocate_new_kernel:
- 	xorq	%r13, %r13
- 	xorq	%r14, %r14
- 	xorq	%r15, %r15
--
- 	ret
--relocate_new_kernel_end:
- 
--	.globl relocate_new_kernel_size
--relocate_new_kernel_size:
--	.quad relocate_new_kernel_end - relocate_new_kernel
-+	.align  16
-+gdt:
-+	.quad	0x0000000000000000	/* NULL descriptor */
-+gdt_cs:
-+	.quad   0x00af9a000000ffff
-+gdt_end:
++static int __init balloon_init(void)
++{
++#if defined(CONFIG_X86) && defined(CONFIG_XEN) 
++	unsigned long pfn;
++	struct page *page;
++#endif
 +
-+gdt_80:
-+	.word	gdt_end - gdt - 1	/* limit */
-+	.quad	0			/* base - filled in by code above */
++	if (!is_running_on_xen())
++		return -ENODEV;
++
++	IPRINTK("Initialising balloon driver.\n");
 +
-+idt_80:
-+	.word	0			/* limit */
-+	.quad	0			/* base */
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/setup64-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/setup64-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/setup64-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/setup64-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,361 @@
-+/* 
-+ * X86-64 specific CPU setup.
-+ * Copyright (C) 1995  Linus Torvalds
-+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
-+ * See setup.c for older changelog.
-+ *
-+ * Jun Nakajima <jun.nakajima at intel.com> 
-+ *   Modified for Xen
-+ *
-+ */ 
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/bootmem.h>
-+#include <linux/bitops.h>
-+#include <linux/module.h>
-+#include <asm/bootsetup.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/smp.h>
-+#include <asm/i387.h>
-+#include <asm/percpu.h>
-+#include <asm/proto.h>
-+#include <asm/sections.h>
 +#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
++	bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
++	totalram_pages   = bs.current_pages;
++#else 
++	bs.current_pages = totalram_pages; 
++#endif
++	bs.target_pages  = bs.current_pages;
++	bs.balloon_low   = 0;
++	bs.balloon_high  = 0;
++	bs.driver_pages  = 0UL;
++	bs.hard_limit    = ~0UL;
++
++	init_timer(&balloon_timer);
++	balloon_timer.data = 0;
++	balloon_timer.function = balloon_alarm;
++    
++#ifdef CONFIG_PROC_FS
++	if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
++		WPRINTK("Unable to create /proc/xen/balloon.\n");
++		return -1;
++	}
++
++	balloon_pde->read_proc  = balloon_read;
++	balloon_pde->write_proc = balloon_write;
++#endif
++	balloon_sysfs_init();
++
++#if defined(CONFIG_X86) && defined(CONFIG_XEN) 
++	/* Initialise the balloon with excess memory space. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++		page = pfn_to_page(pfn);
++		if (!PageReserved(page))
++			balloon_append(page);
++	}
 +#endif
 +
-+char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++	target_watch.callback = watch_target;
++	xenstore_notifier.notifier_call = balloon_init_watcher;
++
++	register_xenstore_notifier(&xenstore_notifier);
++    
++	return 0;
++}
 +
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++subsys_initcall(balloon_init);
 +
-+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
-+EXPORT_SYMBOL(_cpu_pda);
-+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++static void __exit balloon_exit(void)
++{
++    /* XXX - release balloon here */
++    return; 
++}
 +
-+#ifndef CONFIG_X86_NO_IDT
-+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 
-+#endif
++module_exit(balloon_exit); 
 +
-+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++void balloon_update_driver_allowance(long delta)
++{
++	unsigned long flags;
 +
-+unsigned long __supported_pte_mask __read_mostly = ~0UL;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+static int do_not_nx __cpuinitdata = 0;
++	balloon_lock(flags);
++	bs.driver_pages += delta;
++	balloon_unlock(flags);
++}
 +
-+/* noexec=on|off
-+Control non executable mappings for 64bit processes.
++#ifdef CONFIG_XEN
++static int dealloc_pte_fn(
++	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++	unsigned long mfn = pte_mfn(*pte);
++	int ret;
++	struct xen_memory_reservation reservation = {
++		.nr_extents   = 1,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++	set_xen_guest_handle(reservation.extent_start, &mfn);
++	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
++	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++	BUG_ON(ret != 1);
++	return 0;
++}
++#endif
 +
-+on	Enable(default)
-+off	Disable
-+*/ 
-+int __init nonx_setup(char *str)
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
 +{
-+	if (!strncmp(str, "on", 2)) {
-+                __supported_pte_mask |= _PAGE_NX; 
-+ 		do_not_nx = 0; 
-+	} else if (!strncmp(str, "off", 3)) {
-+		do_not_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
-+        }
-+	return 1;
-+} 
-+__setup("noexec=", nonx_setup);	/* parsed early actually */
++	unsigned long flags;
++	void *v;
++	struct page *page, **pagevec;
++	int i, ret;
 +
-+int force_personality32 = 0; 
++	pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
++	if (pagevec == NULL)
++		return NULL;
 +
-+/* noexec32=on|off
-+Control non executable heap for 32bit processes.
-+To control the stack too use noexec=off
++	for (i = 0; i < nr_pages; i++) {
++		page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD);
++		if (page == NULL)
++			goto err;
 +
-+on	PROT_READ does not imply PROT_EXEC for 32bit processes
-+off	PROT_READ implies PROT_EXEC (default)
-+*/
-+static int __init nonx32_setup(char *str)
-+{
-+	if (!strcmp(str, "on"))
-+		force_personality32 &= ~READ_IMPLIES_EXEC;
-+	else if (!strcmp(str, "off"))
-+		force_personality32 |= READ_IMPLIES_EXEC;
-+	return 1;
-+}
-+__setup("noexec32=", nonx32_setup);
++		v = page_address(page);
++		scrub_pages(v, 1);
 +
-+/*
-+ * Great future plan:
-+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-+ * Always point %gs to its beginning
-+ */
-+void __init setup_per_cpu_areas(void)
-+{ 
-+	int i;
-+	unsigned long size;
++		balloon_lock(flags);
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+	prefill_possible_map();
++		if (xen_feature(XENFEAT_auto_translated_physmap)) {
++			unsigned long gmfn = page_to_pfn(page);
++			struct xen_memory_reservation reservation = {
++				.nr_extents   = 1,
++				.extent_order = 0,
++				.domid        = DOMID_SELF
++			};
++			set_xen_guest_handle(reservation.extent_start, &gmfn);
++			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++						   &reservation);
++			if (ret == 1)
++				ret = 0; /* success */
++		} else {
++#ifdef CONFIG_XEN
++			ret = apply_to_page_range(&init_mm, (unsigned long)v,
++						  PAGE_SIZE, dealloc_pte_fn,
++						  NULL);
++#else
++			/* Cannot handle non-auto translate mode. */
++			ret = 1;
 +#endif
++		}
 +
-+	/* Copy section for each CPU (we discard the original) */
-+	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-+#ifdef CONFIG_MODULES
-+	if (size < PERCPU_ENOUGH_ROOM)
-+		size = PERCPU_ENOUGH_ROOM;
-+#endif
++		if (ret != 0) {
++			balloon_unlock(flags);
++			balloon_free_page(page);
++			goto err;
++		}
 +
-+	for_each_cpu_mask (i, cpu_possible_map) {
-+		char *ptr;
++		totalram_pages = --bs.current_pages;
 +
-+		if (!NODE_DATA(cpu_to_node(i))) {
-+			printk("cpu with no node %d, num_online_nodes %d\n",
-+			       i, num_online_nodes());
-+			ptr = alloc_bootmem(size);
-+		} else { 
-+			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
-+		}
-+		if (!ptr)
-+			panic("Cannot allocate cpu data for CPU %d\n", i);
-+		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
-+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++		balloon_unlock(flags);
 +	}
-+} 
 +
++ out:
++	schedule_work(&balloon_worker);
 +#ifdef CONFIG_XEN
-+static void switch_pt(void)
-+{
-+	xen_pt_switch(__pa(init_level4_pgt));
-+        xen_new_user_pt(__pa(init_level4_user_pgt));
++	flush_tlb_all();
++#endif
++	return pagevec;
++
++ err:
++	balloon_lock(flags);
++	while (--i >= 0)
++		balloon_append(pagevec[i]);
++	balloon_unlock(flags);
++	kfree(pagevec);
++	pagevec = NULL;
++	goto out;
 +}
 +
-+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
 +{
-+	unsigned long frames[16];
-+	unsigned long va;
-+	int f;
++	unsigned long flags;
++	int i;
 +
-+	for (va = gdt_descr->address, f = 0;
-+	     va < gdt_descr->address + gdt_descr->size;
-+	     va += PAGE_SIZE, f++) {
-+		frames[f] = virt_to_mfn(va);
-+		make_page_readonly(
-+			(void *)va, XENFEAT_writable_descriptor_tables);
++	if (pagevec == NULL)
++		return;
++
++	balloon_lock(flags);
++	for (i = 0; i < nr_pages; i++) {
++		BUG_ON(page_count(pagevec[i]) != 1);
++		balloon_append(pagevec[i]);
 +	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
-+                               sizeof (struct desc_struct)))
-+		BUG();
-+}
-+#else
-+static void switch_pt(void)
-+{
-+	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++	balloon_unlock(flags);
++
++	kfree(pagevec);
++
++	schedule_work(&balloon_worker);
 +}
 +
-+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
++void balloon_release_driver_page(struct page *page)
 +{
-+	asm volatile("lgdt %0" :: "m" (*gdt_descr));
-+	asm volatile("lidt %0" :: "m" (idt_descr));
++	unsigned long flags;
++
++	balloon_lock(flags);
++	balloon_append(page);
++	bs.driver_pages--;
++	balloon_unlock(flags);
++
++	schedule_work(&balloon_worker);
 +}
-+#endif
 +
-+void pda_init(int cpu)
-+{ 
-+	struct x8664_pda *pda = cpu_pda(cpu);
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
 +
-+	/* Setup up data that may be needed in __get_free_pages early */
-+	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
-+#ifndef CONFIG_XEN
-+	wrmsrl(MSR_GS_BASE, pda);
-+#else
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
-+#endif
-+	pda->cpunumber = cpu; 
-+	pda->irqcount = -1;
-+	pda->kernelstack = 
-+		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
-+	pda->active_mm = &init_mm;
-+	pda->mmu_state = 0;
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/balloon/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/balloon/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,58 @@
++/******************************************************************************
++ * balloon/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	if (cpu == 0) {
-+#ifdef CONFIG_XEN
-+		xen_init_pt();
-+#endif
-+		/* others are initialized in smpboot.c */
-+		pda->pcurrent = &init_task;
-+		pda->irqstackptr = boot_cpu_stack; 
-+	} else {
-+		pda->irqstackptr = (char *)
-+			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-+		if (!pda->irqstackptr)
-+			panic("cannot allocate irqstack for cpu %d", cpu); 
-+	}
++#ifndef __XEN_BALLOON_COMMON_H__
++#define __XEN_BALLOON_COMMON_H__
 +
-+	switch_pt();
++#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
 +
-+	pda->irqstackptr += IRQSTACKSIZE-64;
-+} 
++struct balloon_stats {
++	/* We aim for 'current allocation' == 'target allocation'. */
++	unsigned long current_pages;
++	unsigned long target_pages;
++	/* We may hit the hard limit in Xen. If we do then we remember it. */
++	unsigned long hard_limit;
++	/*
++	 * Drivers may alter the memory reservation independently, but they
++	 * must inform the balloon driver so we avoid hitting the hard limit.
++	 */
++	unsigned long driver_pages;
++	/* Number of pages in high- and low-memory balloons. */
++	unsigned long balloon_low;
++	unsigned long balloon_high;
++};
 +
-+#ifndef CONFIG_X86_NO_TSS
-+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
-+__attribute__((section(".bss.page_aligned")));
-+#endif
++extern struct balloon_stats balloon_stats;
++#define bs balloon_stats
 +
-+/* May not be marked __init: used by software suspend */
-+void syscall_init(void)
-+{
-+#ifndef CONFIG_XEN
-+	/* 
-+	 * LSTAR and STAR live in a bit strange symbiosis.
-+	 * They both write to the same internal register. STAR allows to set CS/DS
-+	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
-+	 */ 
-+	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
-+	wrmsrl(MSR_LSTAR, system_call); 
++int balloon_sysfs_init(void);
++void balloon_sysfs_exit(void);
 +
-+	/* Flags to clear on syscall */
-+	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
-+#endif
-+#ifdef CONFIG_IA32_EMULATION   		
-+	syscall32_cpu_init ();
++void balloon_set_new_target(unsigned long target);
++
++#endif /* __XEN_BALLOON_COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/balloon/sysfs.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/balloon/sysfs.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,170 @@
++/******************************************************************************
++ * balloon/sysfs.c
++ *
++ * Xen balloon driver - sysfs interfaces.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/stat.h>
++#include <linux/string.h>
++#include <linux/sysdev.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
 +#endif
-+}
 +
-+void __cpuinit check_efer(void)
-+{
-+	unsigned long efer;
++#define BALLOON_CLASS_NAME "xen_memory"
 +
-+	rdmsrl(MSR_EFER, efer); 
-+        if (!(efer & EFER_NX) || do_not_nx) { 
-+                __supported_pte_mask &= ~_PAGE_NX; 
-+        }       
++#define BALLOON_SHOW(name, format, args...)			\
++	static ssize_t show_##name(struct sys_device *dev,	\
++				   char *buf)			\
++	{							\
++		return sprintf(buf, format, ##args);		\
++	}							\
++	static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
++
++BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
++BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
++BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
++BALLOON_SHOW(hard_limit_kb,
++	     (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
++	     (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
++BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
++
++static ssize_t show_target_kb(struct sys_device *dev, char *buf)
++{
++	return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
 +}
 +
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ * A lot of state is already set up in PDA init.
-+ */
-+void __cpuinit cpu_init (void)
++static ssize_t store_target_kb(struct sys_device *dev,
++			       const char *buf,
++			       size_t count)
 +{
-+	int cpu = stack_smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct *t = &per_cpu(init_tss, cpu);
-+	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
-+	unsigned long v; 
-+	char *estacks = NULL; 
-+	unsigned i;
-+#endif
-+	struct task_struct *me;
++	char memstring[64], *endchar;
++	unsigned long long target_bytes;
 +
-+	/* CPU 0 is initialised in head64.c */
-+	if (cpu != 0) {
-+		pda_init(cpu);
-+		zap_low_mappings(cpu);
-+	}
-+#ifndef CONFIG_X86_NO_TSS
-+	else
-+		estacks = boot_exception_stacks; 
-+#endif
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++	
++	if (count <= 1)
++		return -EBADMSG; /* runt */
++	if (count > sizeof(memstring))
++		return -EFBIG;   /* too long */
++	strcpy(memstring, buf);
++	
++	target_bytes = memparse(memstring, &endchar);
++	balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++	
++	return count;
++}
 +
-+	me = current;
++static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
++		   show_target_kb, store_target_kb);
 +
-+	if (cpu_test_and_set(cpu, cpu_initialized))
-+		panic("CPU#%d already initialized!\n", cpu);
++static struct sysdev_attribute *balloon_attrs[] = {
++	&attr_target_kb,
++};
 +
-+	printk("Initializing CPU#%d\n", cpu);
++static struct attribute *balloon_info_attrs[] = {
++	&attr_current_kb.attr,
++	&attr_low_kb.attr,
++	&attr_high_kb.attr,
++	&attr_hard_limit_kb.attr,
++	&attr_driver_kb.attr,
++	NULL
++};
 +
-+	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++static struct attribute_group balloon_info_group = {
++	.name = "info",
++	.attrs = balloon_info_attrs,
++};
 +
-+	/*
-+	 * Initialize the per-CPU GDT with the boot GDT,
-+	 * and set up the GDT descriptor:
-+	 */
-+#ifndef CONFIG_XEN 
-+	if (cpu)
-+ 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
-+#endif
++static struct sysdev_class balloon_sysdev_class = {
++	set_kset_name(BALLOON_CLASS_NAME),
++};
 +
-+	cpu_gdt_descr[cpu].size = GDT_SIZE;
-+	cpu_gdt_init(&cpu_gdt_descr[cpu]);
++static struct sys_device balloon_sysdev;
 +
-+	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
-+	syscall_init();
++static int register_balloon(struct sys_device *sysdev)
++{
++	int i, error;
 +
-+	wrmsrl(MSR_FS_BASE, 0);
-+	wrmsrl(MSR_KERNEL_GS_BASE, 0);
-+	barrier(); 
++	error = sysdev_class_register(&balloon_sysdev_class);
++	if (error)
++		return error;
 +
-+	check_efer();
++	sysdev->id = 0;
++	sysdev->cls = &balloon_sysdev_class;
 +
-+#ifndef CONFIG_X86_NO_TSS
-+	/*
-+	 * set up and load the per-CPU TSS
-+	 */
-+	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-+		if (cpu) {
-+			static const unsigned int order[N_EXCEPTION_STACKS] = {
-+				[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-+				[DEBUG_STACK - 1] = DEBUG_STACK_ORDER
-+			};
++	error = sysdev_register(sysdev);
++	if (error) {
++		sysdev_class_unregister(&balloon_sysdev_class);
++		return error;
++	}
 +
-+			estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-+			if (!estacks)
-+				panic("Cannot allocate exception stack %ld %d\n",
-+				      v, cpu); 
-+		}
-+		switch (v + 1) {
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		case DEBUG_STACK:
-+			cpu_pda(cpu)->debugstack = (unsigned long)estacks;
-+			estacks += DEBUG_STKSZ;
-+			break;
-+#endif
-+		default:
-+			estacks += EXCEPTION_STKSZ;
-+			break;
-+		}
-+		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++	for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
++		error = sysdev_create_file(sysdev, balloon_attrs[i]);
++		if (error)
++			goto fail;
 +	}
 +
-+	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+	/*
-+	 * <= is required because the CPU will access up to
-+	 * 8 bits beyond the end of the IO permission bitmap.
-+	 */
-+	for (i = 0; i <= IO_BITMAP_LONGS; i++)
-+		t->io_bitmap[i] = ~0UL;
-+#endif
++	error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
++	if (error)
++		goto fail;
++	
++	return 0;
 +
-+	atomic_inc(&init_mm.mm_count);
-+	me->active_mm = &init_mm;
-+	if (me->mm)
-+		BUG();
-+	enter_lazy_tlb(&init_mm, me);
++ fail:
++	while (--i >= 0)
++		sysdev_remove_file(sysdev, balloon_attrs[i]);
++	sysdev_unregister(sysdev);
++	sysdev_class_unregister(&balloon_sysdev_class);
++	return error;
++}
 +
-+#ifndef CONFIG_X86_NO_TSS
-+	set_tss_desc(cpu, t);
-+#endif
-+#ifndef CONFIG_XEN
-+	load_TR_desc();
-+#endif
-+	load_LDT(&init_mm.context);
++static void unregister_balloon(struct sys_device *sysdev)
++{
++	int i;
 +
-+	/*
-+	 * Clear all 6 debug registers:
-+	 */
++	sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
++	for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
++		sysdev_remove_file(sysdev, balloon_attrs[i]);
++	sysdev_unregister(sysdev);
++	sysdev_class_unregister(&balloon_sysdev_class);
++}
 +
-+	set_debugreg(0UL, 0);
-+	set_debugreg(0UL, 1);
-+	set_debugreg(0UL, 2);
-+	set_debugreg(0UL, 3);
-+	set_debugreg(0UL, 6);
-+	set_debugreg(0UL, 7);
++int balloon_sysfs_init(void)
++{
++	return register_balloon(&balloon_sysdev);
++}
 +
-+	fpu_init(); 
++void balloon_sysfs_exit(void)
++{
++	unregister_balloon(&balloon_sysdev);
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/setup-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/setup-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/setup-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1677 @@
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
++
++blkbk-y	:= blkback.o xenbus.o interface.o vbd.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/blkback.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/blkback.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,646 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/main.c
++ * 
++ * Back-end of the driver for virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A 
++ * reference front-end implementation can be found in:
++ *  arch/xen/drivers/blkif/frontend
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Copyright (c) 2005, Christopher Clark
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++
 +/*
-+ *  linux/arch/x86-64/kernel/setup.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Nov 2001 Dave Jones <davej at suse.de>
-+ *  Forked from i386 setup code.
++ * These are rather arbitrary. They are fairly large because adjacent requests
++ * pulled from a communication ring are quite likely to end up being part of
++ * the same scatter/gather request at the disc.
++ * 
++ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
++ * 
++ * This will increase the chances of being able to write whole tracks.
++ * 64 should be enough to keep us competitive with Linux.
 + */
++static int blkif_reqs = 64;
++module_param_named(reqs, blkif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
++
++/* Run-time switchable: /sys/module/blkback/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
 +
 +/*
-+ * This file handles the architecture-dependent parts of initialization
++ * Each outstanding request that we've passed to the lower device layers has a 
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
++ * the pendcnt towards zero. When it hits zero, the specified domain has a 
++ * response queued for it, with the saved 'id' passed back.
 + */
++typedef struct {
++	blkif_t       *blkif;
++	u64            id;
++	int            nr_pages;
++	atomic_t       pendcnt;
++	unsigned short operation;
++	int            status;
++	struct list_head free_list;
++} pending_req_t;
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/stddef.h>
-+#include <linux/unistd.h>
-+#include <linux/ptrace.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/screen_info.h>
-+#include <linux/ioport.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/initrd.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <asm/processor.h>
-+#include <linux/console.h>
-+#include <linux/seq_file.h>
-+#include <linux/crash_dump.h>
-+#include <linux/root_dev.h>
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/kallsyms.h>
-+#include <linux/edd.h>
-+#include <linux/mmzone.h>
-+#include <linux/kexec.h>
-+#include <linux/cpufreq.h>
-+#include <linux/dmi.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/ctype.h>
++static pending_req_t *pending_reqs;
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
 +
-+#include <asm/mtrr.h>
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/msr.h>
-+#include <asm/desc.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/dma.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmu_context.h>
-+#include <asm/bootsetup.h>
-+#include <asm/proto.h>
-+#include <asm/setup.h>
-+#include <asm/mach_apic.h>
-+#include <asm/numa.h>
-+#include <asm/sections.h>
-+#include <asm/dmi.h>
-+#ifdef CONFIG_XEN
-+#include <linux/percpu.h>
-+#include <xen/interface/physdev.h>
-+#include "setup_arch_pre.h"
-+#include <asm/hypervisor.h>
-+#include <xen/interface/nmi.h>
-+#include <xen/features.h>
-+#include <xen/xencons.h>
-+#define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-+#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
-+#include <asm/mach-xen/setup_arch_post.h>
-+#include <xen/interface/memory.h>
++#define BLKBACK_INVALID_HANDLE (~0)
 +
-+#ifdef CONFIG_XEN
-+#include <xen/interface/kexec.h>
-+#endif
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
 +
-+extern unsigned long start_pfn;
-+extern struct edid_info edid_info;
++static inline int vaddr_pagenr(pending_req_t *req, int seg)
++{
++	return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
++}
 +
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
++static inline unsigned long vaddr(pending_req_t *req, int seg)
++{
++	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++	return (unsigned long)pfn_to_kaddr(pfn);
++}
 +
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
++#define pending_handle(_req, _seg) \
++	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
 +
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+	xen_panic_event, NULL, 0 /* try to go last */
-+};
 +
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++			  unsigned short op, int st);
 +
-+EXPORT_SYMBOL(phys_to_machine_mapping);
++/******************************************************************
++ * misc small helpers
++ */
++static pending_req_t* alloc_req(void)
++{
++	pending_req_t *req = NULL;
++	unsigned long flags;
 +
-+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-+DEFINE_PER_CPU(int, nr_multicall_ents);
++	spin_lock_irqsave(&pending_free_lock, flags);
++	if (!list_empty(&pending_free)) {
++		req = list_entry(pending_free.next, pending_req_t, free_list);
++		list_del(&req->free_list);
++	}
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	return req;
++}
 +
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+#endif
++static void free_req(pending_req_t *req)
++{
++	unsigned long flags;
++	int was_empty;
 +
-+/*
-+ * Machine setup..
-+ */
++	spin_lock_irqsave(&pending_free_lock, flags);
++	was_empty = list_empty(&pending_free);
++	list_add(&req->free_list, &pending_free);
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	if (was_empty)
++		wake_up(&pending_free_wq);
++}
 +
-+struct cpuinfo_x86 boot_cpu_data __read_mostly;
-+EXPORT_SYMBOL(boot_cpu_data);
++static void unplug_queue(blkif_t *blkif)
++{
++	if (blkif->plug == NULL)
++		return;
++	if (blkif->plug->unplug_fn)
++		blkif->plug->unplug_fn(blkif->plug);
++	blk_put_queue(blkif->plug);
++	blkif->plug = NULL;
++}
 +
-+unsigned long mmu_cr4_features;
++static void plug_queue(blkif_t *blkif, struct bio *bio)
++{
++	request_queue_t *q = bdev_get_queue(bio->bi_bdev);
 +
-+int acpi_disabled;
-+EXPORT_SYMBOL(acpi_disabled);
-+#ifdef	CONFIG_ACPI
-+extern int __initdata acpi_ht;
-+extern acpi_interrupt_flags	acpi_sci_flags;
-+int __initdata acpi_force = 0;
-+#endif
++	if (q == blkif->plug)
++		return;
++	unplug_queue(blkif);
++	blk_get_queue(q);
++	blkif->plug = q;
++}
 +
-+int acpi_numa __initdata;
++static void fast_flush_area(pending_req_t *req)
++{
++	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	unsigned int i, invcount = 0;
++	grant_handle_t handle;
++	int ret;
 +
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
++	for (i = 0; i < req->nr_pages; i++) {
++		handle = pending_handle(req, i);
++		if (handle == BLKBACK_INVALID_HANDLE)
++			continue;
++		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
++				    GNTMAP_host_map, handle);
++		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
++		invcount++;
++	}
 +
-+unsigned long saved_video_mode;
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_unmap_grant_ref, unmap, invcount);
++	BUG_ON(ret);
++}
 +
-+/* 
-+ * Early DMI memory
++/******************************************************************
++ * SCHEDULER FUNCTIONS
 + */
-+int dmi_alloc_index;
-+char dmi_alloc_data[DMI_MAX_DATA];
 +
-+/*
-+ * Setup options
-+ */
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+struct sys_desc_table_struct {
-+	unsigned short length;
-+	unsigned char table[0];
-+};
++static void print_stats(blkif_t *blkif)
++{
++	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
++	       current->comm, blkif->st_oo_req,
++	       blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
++	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++	blkif->st_rd_req = 0;
++	blkif->st_wr_req = 0;
++	blkif->st_oo_req = 0;
++}
 +
-+struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
-+struct e820map e820;
-+#ifdef CONFIG_XEN
-+struct e820map machine_e820;
-+#endif
++int blkif_schedule(void *arg)
++{
++	blkif_t *blkif = arg;
 +
-+extern int root_mountflags;
++	blkif_get(blkif);
 +
-+char command_line[COMMAND_LINE_SIZE];
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: started\n", current->comm);
 +
-+struct resource standard_io_resources[] = {
-+	{ .name = "dma1", .start = 0x00, .end = 0x1f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic1", .start = 0x20, .end = 0x21,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer0", .start = 0x40, .end = 0x43,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer1", .start = 0x50, .end = 0x53,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "fpu", .start = 0xf0, .end = 0xff,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
-+};
++	while (!kthread_should_stop()) {
++		if (try_to_freeze())
++			continue;
 +
-+#define STANDARD_IO_RESOURCES \
-+	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++		wait_event_interruptible(
++			blkif->wq,
++			blkif->waiting_reqs || kthread_should_stop());
++		wait_event_interruptible(
++			pending_free_wq,
++			!list_empty(&pending_free) || kthread_should_stop());
 +
-+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++		blkif->waiting_reqs = 0;
++		smp_mb(); /* clear flag *before* checking for work */
 +
-+struct resource data_resource = {
-+	.name = "Kernel data",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
-+struct resource code_resource = {
-+	.name = "Kernel code",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
++		if (do_block_io_op(blkif))
++			blkif->waiting_reqs = 1;
++		unplug_queue(blkif);
 +
-+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++		if (log_stats && time_after(jiffies, blkif->st_print))
++			print_stats(blkif);
++	}
 +
-+static struct resource system_rom_resource = {
-+	.name = "System ROM",
-+	.start = 0xf0000,
-+	.end = 0xfffff,
-+	.flags = IORESOURCE_ROM,
-+};
++	if (log_stats)
++		print_stats(blkif);
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: exiting\n", current->comm);
 +
-+static struct resource extension_rom_resource = {
-+	.name = "Extension ROM",
-+	.start = 0xe0000,
-+	.end = 0xeffff,
-+	.flags = IORESOURCE_ROM,
-+};
++	blkif->xenblkd = NULL;
++	blkif_put(blkif);
 +
-+static struct resource adapter_rom_resources[] = {
-+	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM }
-+};
++	return 0;
++}
 +
-+#define ADAPTER_ROM_RESOURCES \
-+	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
 +
-+static struct resource video_rom_resource = {
-+	.name = "Video ROM",
-+	.start = 0xc0000,
-+	.end = 0xc7fff,
-+	.flags = IORESOURCE_ROM,
-+};
++static void __end_block_io_op(pending_req_t *pending_req, int error)
++{
++	/* An error fails the entire request. */
++	if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
++	    (error == -EOPNOTSUPP)) {
++		DPRINTK("blkback: write barrier op failed, not supported\n");
++		blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
++		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
++	} else if (error) {
++		DPRINTK("Buffer not up-to-date at end of operation, "
++			"error=%d\n", error);
++		pending_req->status = BLKIF_RSP_ERROR;
++	}
 +
-+static struct resource video_ram_resource = {
-+	.name = "Video RAM area",
-+	.start = 0xa0000,
-+	.end = 0xbffff,
-+	.flags = IORESOURCE_RAM,
-+};
++	if (atomic_dec_and_test(&pending_req->pendcnt)) {
++		fast_flush_area(pending_req);
++		make_response(pending_req->blkif, pending_req->id,
++			      pending_req->operation, pending_req->status);
++		blkif_put(pending_req->blkif);
++		free_req(pending_req);
++	}
++}
 +
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++{
++	if (bio->bi_size != 0)
++		return 1;
++	__end_block_io_op(bio->bi_private, error);
++	bio_put(bio);
++	return error;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
 +
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
++static void blkif_notify_work(blkif_t *blkif)
 +{
-+	unsigned char *p, sum = 0;
-+
-+	for (p = rom; p < rom + length; p++)
-+		sum += *p;
-+	return sum == 0;
++	blkif->waiting_reqs = 1;
++	wake_up(&blkif->wq);
 +}
 +
-+static void __init probe_roms(void)
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	unsigned long start, length, upper;
-+	unsigned char *rom;
-+	int	      i;
-+
-+#ifdef CONFIG_XEN
-+	/* Nothing to do if not running in dom0. */
-+	if (!is_initial_xendomain())
-+		return;
-+#endif
++	blkif_notify_work(dev_id);
++	return IRQ_HANDLED;
++}
 +
-+	/* video rom */
-+	upper = adapter_rom_resources[0].start;
-+	for (start = video_rom_resource.start; start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
 +
-+		video_rom_resource.start = start;
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
 +
-+		/* if checksum okay, trust length byte */
-+		if (length && romchecksum(rom, length))
-+			video_rom_resource.end = start + length - 1;
++static int do_block_io_op(blkif_t *blkif)
++{
++	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++	blkif_request_t req;
++	pending_req_t *pending_req;
++	RING_IDX rc, rp;
++	int more_to_do = 0;
 +
-+		request_resource(&iomem_resource, &video_rom_resource);
-+		break;
-+			}
++	rc = blk_rings->common.req_cons;
++	rp = blk_rings->common.sring->req_prod;
++	rmb(); /* Ensure we see queued requests up to 'rp'. */
 +
-+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+	if (start < upper)
-+		start = upper;
++	while (rc != rp) {
 +
-+	/* system rom */
-+	request_resource(&iomem_resource, &system_rom_resource);
-+	upper = system_rom_resource.start;
++		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
++			break;
 +
-+	/* check for extension rom (ignore length byte!) */
-+	rom = isa_bus_to_virt(extension_rom_resource.start);
-+	if (romsignature(rom)) {
-+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+		if (romchecksum(rom, length)) {
-+			request_resource(&iomem_resource, &extension_rom_resource);
-+			upper = extension_rom_resource.start;
++		pending_req = alloc_req();
++		if (NULL == pending_req) {
++			blkif->st_oo_req++;
++			more_to_do = 1;
++			break;
 +		}
-+	}
 +
-+	/* check for adapter roms on 2k boundaries */
-+	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
++		if (kthread_should_stop()) {
++			more_to_do = 1;
++			break;
++		}
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++		switch (blkif->blk_protocol) {
++		case BLKIF_PROTOCOL_NATIVE:
++			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
++			break;
++		case BLKIF_PROTOCOL_X86_32:
++			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++			break;
++		case BLKIF_PROTOCOL_X86_64:
++			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++			break;
++		default:
++			BUG();
++		}
++		blk_rings->common.req_cons = ++rc; /* before make_response() */
 +
-+		/* but accept any length that fits if checksum okay */
-+		if (!length || start + length > upper || !romchecksum(rom, length))
-+			continue;
++		/* Apply all sanity checks to /private copy/ of request. */
++		barrier();
 +
-+		adapter_rom_resources[i].start = start;
-+		adapter_rom_resources[i].end = start + length - 1;
-+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++		switch (req.operation) {
++		case BLKIF_OP_READ:
++			blkif->st_rd_req++;
++			dispatch_rw_block_io(blkif, &req, pending_req);
++			break;
++		case BLKIF_OP_WRITE_BARRIER:
++			blkif->st_br_req++;
++			/* fall through */
++		case BLKIF_OP_WRITE:
++			blkif->st_wr_req++;
++			dispatch_rw_block_io(blkif, &req, pending_req);
++			break;
++		default:
++			/* A good sign something is wrong: sleep for a while to
++			 * avoid excessive CPU consumption by a bad guest. */
++			msleep(1);
++			DPRINTK("error: unknown block io operation [%d]\n",
++				req.operation);
++			make_response(blkif, req.id, req.operation,
++				      BLKIF_RSP_ERROR);
++			free_req(pending_req);
++			break;
++		}
 +
-+		start = adapter_rom_resources[i++].end & ~2047UL;
++		/* Yield point for this unbounded loop. */
++		cond_resched();
 +	}
-+}
 +
-+/* Check for full argument with no trailing characters */
-+static int fullarg(char *p, char *arg)
-+{
-+	int l = strlen(arg);
-+	return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++	return more_to_do;
 +}
 +
-+static __init void parse_cmdline_early (char ** cmdline_p)
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req)
 +{
-+	char c = ' ', *to = command_line, *from = COMMAND_LINE;
-+	int len = 0;
-+	int userdef = 0;
++	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	struct phys_req preq;
++	struct { 
++		unsigned long buf; unsigned int nsec;
++	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	unsigned int nseg;
++	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	int ret, i, nbio = 0;
++	int operation;
 +
-+	for (;;) {
-+		if (c != ' ') 
-+			goto next_char; 
++	switch (req->operation) {
++	case BLKIF_OP_READ:
++		operation = READ;
++		break;
++	case BLKIF_OP_WRITE:
++		operation = WRITE;
++		break;
++	case BLKIF_OP_WRITE_BARRIER:
++		operation = WRITE_BARRIER;
++		break;
++	default:
++		operation = 0; /* make gcc happy */
++		BUG();
++	}
 +
-+#ifdef  CONFIG_SMP
-+		/*
-+		 * If the BIOS enumerates physical processors before logical,
-+		 * maxcpus=N at enumeration-time can be used to disable HT.
-+		 */
-+		else if (!memcmp(from, "maxcpus=", 8)) {
-+			extern unsigned int maxcpus;
++	/* Check that number of segments is sane. */
++	nseg = req->nr_segments;
++	if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || 
++	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++		DPRINTK("Bad number of segments in request (%d)\n", nseg);
++		goto fail_response;
++	}
 +
-+			maxcpus = simple_strtoul(from + 8, NULL, 0);
-+		}
-+#endif
-+#ifdef CONFIG_ACPI
-+		/* "acpi=off" disables both ACPI table parsing and interpreter init */
-+		if (fullarg(from,"acpi=off"))
-+			disable_acpi();
++	preq.dev           = req->handle;
++	preq.sector_number = req->sector_number;
++	preq.nr_sects      = 0;
 +
-+		if (fullarg(from, "acpi=force")) { 
-+			/* add later when we do DMI horrors: */
-+			acpi_force = 1;
-+			acpi_disabled = 0;
-+		}
++	pending_req->blkif     = blkif;
++	pending_req->id        = req->id;
++	pending_req->operation = req->operation;
++	pending_req->status    = BLKIF_RSP_OKAY;
++	pending_req->nr_pages  = nseg;
 +
-+		/* acpi=ht just means: do ACPI MADT parsing 
-+		   at bootup, but don't enable the full ACPI interpreter */
-+		if (fullarg(from, "acpi=ht")) { 
-+			if (!acpi_force)
-+				disable_acpi();
-+			acpi_ht = 1; 
-+		}
-+                else if (fullarg(from, "pci=noacpi")) 
-+			acpi_disable_pci();
-+		else if (fullarg(from, "acpi=noirq"))
-+			acpi_noirq_set();
++	for (i = 0; i < nseg; i++) {
++		uint32_t flags;
 +
-+		else if (fullarg(from, "acpi_sci=edge"))
-+			acpi_sci_flags.trigger =  1;
-+		else if (fullarg(from, "acpi_sci=level"))
-+			acpi_sci_flags.trigger = 3;
-+		else if (fullarg(from, "acpi_sci=high"))
-+			acpi_sci_flags.polarity = 1;
-+		else if (fullarg(from, "acpi_sci=low"))
-+			acpi_sci_flags.polarity = 3;
++		seg[i].nsec = req->seg[i].last_sect -
++			req->seg[i].first_sect + 1;
 +
-+		/* acpi=strict disables out-of-spec workarounds */
-+		else if (fullarg(from, "acpi=strict")) {
-+			acpi_strict = 1;
-+		}
-+#ifdef CONFIG_X86_IO_APIC
-+		else if (fullarg(from, "acpi_skip_timer_override"))
-+			acpi_skip_timer_override = 1;
-+#endif
-+#endif
++		if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
++		    (req->seg[i].last_sect < req->seg[i].first_sect))
++			goto fail_response;
++		preq.nr_sects += seg[i].nsec;
 +
-+#ifndef CONFIG_XEN
-+		if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
-+			clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+			disable_apic = 1;
-+		}
++		flags = GNTMAP_host_map;
++		if (operation != READ)
++			flags |= GNTMAP_readonly;
++		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++				  req->seg[i].gref, blkif->domid);
++	}
 +
-+		if (fullarg(from, "noapic"))
-+			skip_ioapic_setup = 1;
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
++	BUG_ON(ret);
 +
-+		if (fullarg(from,"apic")) {
-+			skip_ioapic_setup = 0;
-+			ioapic_force = 1;
++	for (i = 0; i < nseg; i++) {
++		if (unlikely(map[i].status != 0)) {
++			DPRINTK("invalid buffer -- could not remap it\n");
++			map[i].handle = BLKBACK_INVALID_HANDLE;
++			ret |= 1;
 +		}
-+#endif
-+			
-+		if (!memcmp(from, "mem=", 4))
-+			parse_memopt(from+4, &from); 
 +
-+		if (!memcmp(from, "memmap=", 7)) {
-+			/* exactmap option is for used defined memory */
-+			if (!memcmp(from+7, "exactmap", 8)) {
-+#ifdef CONFIG_CRASH_DUMP
-+				/* If we are doing a crash dump, we
-+				 * still need to know the real mem
-+				 * size before original memory map is
-+				 * reset.
-+				 */
-+				saved_max_pfn = e820_end_of_ram();
-+#endif
-+				from += 8+7;
-+				end_pfn_map = 0;
-+				e820.nr_map = 0;
-+				userdef = 1;
-+			}
-+			else {
-+				parse_memmapopt(from+7, &from);
-+				userdef = 1;
-+			}
-+		}
++		pending_handle(pending_req, i) = map[i].handle;
 +
-+#ifdef CONFIG_NUMA
-+		if (!memcmp(from, "numa=", 5))
-+			numa_setup(from+5); 
-+#endif
++		if (ret)
++			continue;
 +
-+		if (!memcmp(from,"iommu=",6)) { 
-+			iommu_setup(from+6); 
-+		}
++		set_phys_to_machine(__pa(vaddr(
++			pending_req, i)) >> PAGE_SHIFT,
++			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++		seg[i].buf  = map[i].dev_bus_addr | 
++			(req->seg[i].first_sect << 9);
++	}
 +
-+		if (fullarg(from,"oops=panic"))
-+			panic_on_oops = 1;
++	if (ret)
++		goto fail_flush;
 +
-+		if (!memcmp(from, "noexec=", 7))
-+			nonx_setup(from + 7);
++	if (vbd_translate(&preq, blkif, operation) != 0) {
++		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
++			operation == READ ? "read" : "write",
++			preq.sector_number,
++			preq.sector_number + preq.nr_sects, preq.dev);
++		goto fail_flush;
++	}
 +
-+#ifdef CONFIG_KEXEC
-+		/* crashkernel=size at addr specifies the location to reserve for
-+		 * a crash kernel.  By reserving this memory we guarantee
-+		 * that linux never set's it up as a DMA target.
-+		 * Useful for holding code to do something appropriate
-+		 * after a kernel panic.
-+		 */
-+		else if (!memcmp(from, "crashkernel=", 12)) {
-+#ifndef CONFIG_XEN
-+			unsigned long size, base;
-+			size = memparse(from+12, &from);
-+			if (*from == '@') {
-+				base = memparse(from+1, &from);
-+				/* FIXME: Do I want a sanity check
-+				 * to validate the memory range?
-+				 */
-+				crashk_res.start = base;
-+				crashk_res.end   = base + size - 1;
-+			}
-+#else
-+			printk("Ignoring crashkernel command line, "
-+			       "parameter will be supplied by xen\n");
-+#endif
++	for (i = 0; i < nseg; i++) {
++		if (((int)preq.sector_number|(int)seg[i].nsec) &
++		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
++			DPRINTK("Misaligned I/O request from domain %d",
++				blkif->domid);
++			goto fail_put_bio;
 +		}
-+#endif
 +
-+#ifdef CONFIG_PROC_VMCORE
-+		/* elfcorehdr= specifies the location of elf core header
-+		 * stored by the crashed kernel. This option will be passed
-+		 * by kexec loader to the capture kernel.
-+		 */
-+		else if(!memcmp(from, "elfcorehdr=", 11))
-+			elfcorehdr_addr = memparse(from+11, &from);
-+#endif
++		while ((bio == NULL) ||
++		       (bio_add_page(bio,
++				     virt_to_page(vaddr(pending_req, i)),
++				     seg[i].nsec << 9,
++				     seg[i].buf & ~PAGE_MASK) == 0)) {
++			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
++			if (unlikely(bio == NULL))
++				goto fail_put_bio;
 +
-+#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
-+		else if (!memcmp(from, "additional_cpus=", 16))
-+			setup_additional_cpus(from+16);
-+#endif
++			bio->bi_bdev    = preq.bdev;
++			bio->bi_private = pending_req;
++			bio->bi_end_io  = end_block_io_op;
++			bio->bi_sector  = preq.sector_number;
++		}
 +
-+	next_char:
-+		c = *(from++);
-+		if (!c)
-+			break;
-+		if (COMMAND_LINE_SIZE <= ++len)
-+			break;
-+		*(to++) = c;
++		preq.sector_number += seg[i].nsec;
 +	}
-+	if (userdef) {
-+		printk(KERN_INFO "user-defined physical RAM map:\n");
-+		e820_print_map("user");
++
++	if (!bio) {
++		BUG_ON(operation != WRITE_BARRIER);
++		bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
++		if (unlikely(bio == NULL))
++			goto fail_put_bio;
++
++		bio->bi_bdev    = preq.bdev;
++		bio->bi_private = pending_req;
++		bio->bi_end_io  = end_block_io_op;
++		bio->bi_sector  = -1;
 +	}
-+	*to = '\0';
-+	*cmdline_p = command_line;
-+}
 +
-+#ifndef CONFIG_NUMA
-+static void __init
-+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+	unsigned long bootmap_size, bootmap;
++	plug_queue(blkif, bio);
++	atomic_set(&pending_req->pendcnt, nbio);
++	blkif_get(blkif);
 +
-+	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-+	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-+	if (bootmap == -1L)
-+		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-+#ifdef CONFIG_XEN
-+	e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
-+#else
-+	e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
-+#endif
-+	reserve_bootmem(bootmap, bootmap_size);
-+} 
-+#endif
++	for (i = 0; i < nbio; i++)
++		submit_bio(operation, biolist[i]);
 +
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ *              from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+     edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
++	if (operation == READ)
++		blkif->st_rd_sect += preq.nr_sects;
++	else if (operation == WRITE || operation == WRITE_BARRIER)
++		blkif->st_wr_sect += preq.nr_sects;
 +
-+#ifndef CONFIG_XEN
-+#define EBDA_ADDR_POINTER 0x40E
++	return;
 +
-+unsigned __initdata ebda_addr;
-+unsigned __initdata ebda_size;
++ fail_put_bio:
++	for (i = 0; i < (nbio-1); i++)
++		bio_put(biolist[i]);
++ fail_flush:
++	fast_flush_area(pending_req);
++ fail_response:
++	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++	free_req(pending_req);
++	msleep(1); /* back off a bit */
++} 
 +
-+static void discover_ebda(void)
-+{
-+	/*
-+	 * there is a real-mode segmented pointer pointing to the 
-+	 * 4K EBDA area at 0x40E
-+	 */
-+	ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
-+	ebda_addr <<= 4;
 +
-+	ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
 +
-+	/* Round EBDA up to pages */
-+	if (ebda_size == 0)
-+		ebda_size = 1;
-+	ebda_size <<= 10;
-+	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
-+	if (ebda_size > 64*1024)
-+		ebda_size = 64*1024;
-+}
-+#else
-+#define discover_ebda() ((void)0)
-+#endif
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
 +
-+void __init setup_arch(char **cmdline_p)
-+{
-+#ifdef CONFIG_XEN
-+	/* Register a call for panic conditions. */
-+	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
 +
-+ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
-+ 	screen_info = SCREEN_INFO;
++static void make_response(blkif_t *blkif, u64 id,
++			  unsigned short op, int st)
++{
++	blkif_response_t  resp;
++	unsigned long     flags;
++	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++	int more_to_do = 0;
++	int notify;
 +
-+	if (is_initial_xendomain()) {
-+		/* This is drawn from a dump from vgacon:startup in
-+		 * standard Linux. */
-+		screen_info.orig_video_mode = 3;
-+		screen_info.orig_video_isVGA = 1;
-+		screen_info.orig_video_lines = 25;
-+		screen_info.orig_video_cols = 80;
-+		screen_info.orig_video_ega_bx = 3;
-+		screen_info.orig_video_points = 16;
-+		screen_info.orig_y = screen_info.orig_video_lines - 1;
-+		if (xen_start_info->console.dom0.info_size >=
-+		    sizeof(struct dom0_vga_console_info)) {
-+			const struct dom0_vga_console_info *info =
-+				(struct dom0_vga_console_info *)(
-+					(char *)xen_start_info +
-+					xen_start_info->console.dom0.info_off);
-+			dom0_init_screen_info(info);
-+		}
-+		xen_start_info->console.domU.mfn = 0;
-+		xen_start_info->console.domU.evtchn = 0;
-+	} else
-+		screen_info.orig_video_isVGA = 0;
++	resp.id        = id;
++	resp.operation = op;
++	resp.status    = st;
 +
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
++	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++	/* Place on the response ring for the relevant domain. */
++	switch (blkif->blk_protocol) {
++	case BLKIF_PROTOCOL_NATIVE:
++		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	case BLKIF_PROTOCOL_X86_32:
++		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	case BLKIF_PROTOCOL_X86_64:
++		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	default:
++		BUG();
++	}
++	blk_rings->common.rsp_prod_pvt++;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++		/*
++		 * Tail check for pending requests. Allows frontend to avoid
++		 * notifications if requests are already in flight (lower
++		 * overheads and promotes batching).
++		 */
++		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
 +
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++		more_to_do = 1;
++	}
 +
++	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 +
-+#endif
++	if (more_to_do)
++		blkif_notify_work(blkif);
++	if (notify)
++		notify_remote_via_irq(blkif->irq);
++}
 +
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
++static int __init blkif_init(void)
++{
++	int i, mmap_pages;
 +
-+	ARCH_SETUP
-+#else
-+ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
-+ 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+#endif	/* !CONFIG_XEN */
-+	setup_memory_region();
-+	copy_edd();
++	mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
 +
-+	if (!MOUNT_ROOT_RDONLY)
-+		root_mountflags &= ~MS_RDONLY;
-+	init_mm.start_code = (unsigned long) &_text;
-+	init_mm.end_code = (unsigned long) &_etext;
-+	init_mm.end_data = (unsigned long) &_edata;
-+	init_mm.brk = (unsigned long) &_end;
++	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
++					blkif_reqs, GFP_KERNEL);
++	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++					mmap_pages, GFP_KERNEL);
++	pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
 +
-+	code_resource.start = virt_to_phys(&_text);
-+	code_resource.end = virt_to_phys(&_etext)-1;
-+	data_resource.start = virt_to_phys(&_etext);
-+	data_resource.end = virt_to_phys(&_edata)-1;
++	if (!pending_reqs || !pending_grant_handles || !pending_pages)
++		goto out_of_memory;
 +
-+	parse_cmdline_early(cmdline_p);
++	for (i = 0; i < mmap_pages; i++)
++		pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
 +
-+	early_identify_cpu(&boot_cpu_data);
++	blkif_interface_init();
 +
-+	/*
-+	 * partially used pages are not usable - thus
-+	 * we are rounding upwards:
-+	 */
-+	end_pfn = e820_end_of_ram();
-+	num_physpages = end_pfn;		/* for pfn_valid */
++	memset(pending_reqs, 0, sizeof(pending_reqs));
++	INIT_LIST_HEAD(&pending_free);
 +
-+	check_efer();
++	for (i = 0; i < blkif_reqs; i++)
++		list_add_tail(&pending_reqs[i].free_list, &pending_free);
 +
-+	discover_ebda();
++	blkif_xenbus_init();
 +
-+	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++	return 0;
 +
-+	if (is_initial_xendomain())
-+		dmi_scan_machine();
++ out_of_memory:
++	kfree(pending_reqs);
++	kfree(pending_grant_handles);
++	free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++	printk("%s: out of memory\n", __FUNCTION__);
++	return -ENOMEM;
++}
 +
-+#ifdef CONFIG_ACPI_NUMA
-+	/*
-+	 * Parse SRAT to discover nodes.
-+	 */
-+	acpi_numa_init();
-+#endif
++module_init(blkif_init);
 +
-+#ifdef CONFIG_NUMA
-+	numa_initmem_init(0, end_pfn); 
-+#else
-+	contig_initmem_init(0, end_pfn);
-+#endif
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,139 @@
++/* 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+#ifdef CONFIG_XEN
-+	/*
-+	 * Reserve kernel, physmap, start info, initial page tables, and
-+	 * direct mapping.
-+	 */
-+	reserve_bootmem_generic(__pa_symbol(&_text),
-+	                        (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
-+#else
-+	/* Reserve direct mapping */
-+	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
-+				(table_end - table_start) << PAGE_SHIFT);
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
 +
-+	/* reserve kernel */
-+	reserve_bootmem_generic(__pa_symbol(&_text),
-+				__pa_symbol(&_end) - __pa_symbol(&_text));
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
 +
-+	/*
-+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-+	 * enabling clean reboots, SMP operation, laptop functions.
-+	 */
-+	reserve_bootmem_generic(0, PAGE_SIZE);
++#define DPRINTK(_f, _a...)			\
++	pr_debug("(file=%s, line=%d) " _f,	\
++		 __FILE__ , __LINE__ , ## _a )
 +
-+	/* reserve ebda region */
-+	if (ebda_addr)
-+		reserve_bootmem_generic(ebda_addr, ebda_size);
++struct vbd {
++	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
++	unsigned char  readonly;    /* Non-zero -> read-only */
++	unsigned char  type;        /* VDISK_xxx */
++	u32            pdevice;     /* phys device that this vbd maps to */
++	struct block_device *bdev;
++};
 +
-+#ifdef CONFIG_SMP
-+	/*
-+	 * But first pinch a few for the stack/trampoline stuff
-+	 * FIXME: Don't need the extra page at 4K, but need to fix
-+	 * trampoline before removing it. (see the GDT stuff)
-+	 */
-+	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++struct backend_info;
 +
-+	/* Reserve SMP trampoline */
-+	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
-+#endif
-+#endif
++typedef struct blkif_st {
++	/* Unique identifier for this interface. */
++	domid_t           domid;
++	unsigned int      handle;
++	/* Physical parameters of the comms window. */
++	unsigned int      irq;
++	/* Comms information. */
++	enum blkif_protocol blk_protocol;
++	blkif_back_rings_t blk_rings;
++	struct vm_struct *blk_ring_area;
++	/* The VBD attached to this interface. */
++	struct vbd        vbd;
++	/* Back pointer to the backend_info. */
++	struct backend_info *be;
++	/* Private fields. */
++	spinlock_t       blk_ring_lock;
++	atomic_t         refcnt;
 +
-+#ifdef CONFIG_ACPI_SLEEP
-+       /*
-+        * Reserve low memory region for sleep support.
-+        */
-+       acpi_reserve_bootmem();
-+#endif
-+#ifdef CONFIG_XEN
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (xen_start_info->mod_start) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
-+			initrd_start = INITRD_START + PAGE_OFFSET;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+			initrd_below_start_ok = 1;
-+		} else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+				(unsigned long)(INITRD_START + INITRD_SIZE),
-+				(unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#else	/* CONFIG_XEN */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (LOADER_TYPE && INITRD_START) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-+			initrd_start =
-+				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+		}
-+		else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+			    (unsigned long)(INITRD_START + INITRD_SIZE),
-+			    (unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#endif	/* !CONFIG_XEN */
-+#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_XEN
-+	xen_machine_kexec_setup_resources();
-+#else
-+	if (crashk_res.start != crashk_res.end) {
-+		reserve_bootmem_generic(crashk_res.start,
-+			crashk_res.end - crashk_res.start + 1);
-+	}
-+#endif
-+#endif
++	wait_queue_head_t   wq;
++	struct task_struct  *xenblkd;
++	unsigned int        waiting_reqs;
++	request_queue_t     *plug;
 +
-+	paging_init();
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * Find and reserve possible boot-time SMP configuration:
-+	 */
-+	find_smp_config();
-+#endif
-+#ifdef CONFIG_XEN
-+	{
-+		int i, j, k, fpp;
-+		unsigned long p2m_pages;
++	/* statistics */
++	unsigned long       st_print;
++	int                 st_rd_req;
++	int                 st_wr_req;
++	int                 st_oo_req;
++	int                 st_br_req;
++	int                 st_rd_sect;
++	int                 st_wr_sect;
 +
-+		p2m_pages = end_pfn;
-+		if (xen_start_info->nr_pages > end_pfn) {
-+			/*
-+			 * the end_pfn was shrunk (probably by mem= or highmem=
-+			 * kernel parameter); shrink reservation with the HV
-+			 */
-+			struct xen_memory_reservation reservation = {
-+				.address_bits = 0,
-+				.extent_order = 0,
-+				.domid = DOMID_SELF
-+			};
-+			unsigned int difference;
-+			int ret;
-+			
-+			difference = xen_start_info->nr_pages - end_pfn;
-+			
-+			set_xen_guest_handle(reservation.extent_start,
-+					     ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
-+			reservation.nr_extents = difference;
-+			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+						   &reservation);
-+			BUG_ON (ret != difference);
-+		}
-+		else if (end_pfn > xen_start_info->nr_pages)
-+			p2m_pages = xen_start_info->nr_pages;
++	wait_queue_head_t waiting_to_free;
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* Make sure we have a large enough P->M table. */
-+			phys_to_machine_mapping = alloc_bootmem_pages(
-+				end_pfn * sizeof(unsigned long));
-+			memset(phys_to_machine_mapping, ~0,
-+			       end_pfn * sizeof(unsigned long));
-+			memcpy(phys_to_machine_mapping,
-+			       (unsigned long *)xen_start_info->mfn_list,
-+			       p2m_pages * sizeof(unsigned long));
-+			free_bootmem(
-+				__pa(xen_start_info->mfn_list),
-+				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+						sizeof(unsigned long))));
++	grant_handle_t shmem_handle;
++	grant_ref_t    shmem_ref;
++} blkif_t;
 +
-+			/*
-+			 * Initialise the list of the frames that specify the
-+			 * list of frames that make up the p2m table. Used by
-+                         * save/restore.
-+			 */
-+			pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++blkif_t *blkif_alloc(domid_t domid);
++void blkif_disconnect(blkif_t *blkif);
++void blkif_free(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
 +
-+			fpp = PAGE_SIZE/sizeof(unsigned long);
-+			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
-+				if ((j % fpp) == 0) {
-+					k++;
-+					BUG_ON(k>=fpp);
-+					pfn_to_mfn_frame_list[k] =
-+						alloc_bootmem_pages(PAGE_SIZE);
-+					pfn_to_mfn_frame_list_list[k] =
-+						virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+					j=0;
-+				}
-+				pfn_to_mfn_frame_list[k][j] =
-+					virt_to_mfn(&phys_to_machine_mapping[i]);
-+			}
-+			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
-+			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+				virt_to_mfn(pfn_to_mfn_frame_list_list);
-+		}
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b)					\
++	do {						\
++		if (atomic_dec_and_test(&(_b)->refcnt))	\
++			wake_up(&(_b)->waiting_to_free);\
++	} while (0)
 +
-+	}
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
++	       unsigned minor, int readonly, int cdrom);
++void vbd_free(struct vbd *vbd);
 +
-+	if (!is_initial_xendomain()) {
-+		acpi_disabled = 1;
-+#ifdef  CONFIG_ACPI
-+		acpi_ht = 0;
-+#endif
-+	}
-+#endif
++unsigned long long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
 +
-+#ifndef CONFIG_XEN
-+	check_ioapic();
-+#endif
++struct phys_req {
++	unsigned short       dev;
++	unsigned short       nr_sects;
++	struct block_device *bdev;
++	blkif_sector_t       sector_number;
++};
 +
-+	zap_low_mappings(0);
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
 +
-+	/*
-+	 * set this early, so we dont allocate cpu0
-+	 * if MADT list doesnt list BSP first
-+	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
-+	 */
-+	cpu_set(0, cpu_present_map);
-+#ifdef CONFIG_ACPI
-+	/*
-+	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
-+	 * Call this early for SRAT node setup.
-+	 */
-+	acpi_boot_table_init();
++void blkif_interface_init(void);
 +
-+	/*
-+	 * Read APIC and some other early information from ACPI tables.
-+	 */
-+	acpi_boot_init();
-+#endif
++void blkif_xenbus_init(void);
 +
-+	init_cpu_to_node();
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int blkif_schedule(void *arg);
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * get boot-time SMP configuration:
-+	 */
-+	if (smp_found_config)
-+		get_smp_config();
-+#ifndef CONFIG_XEN
-+	init_apic_mappings();
-+#endif
-+#endif
-+#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+	prefill_possible_map();
-+#endif
++int blkback_barrier(struct xenbus_transaction xbt,
++		    struct backend_info *be, int state);
 +
-+	/*
-+	 * Request address space for all standard RAM and ROM resources
-+	 * and also for regions reported as reserved by the e820.
-+	 */
-+	probe_roms();
-+#ifdef CONFIG_XEN
-+	if (is_initial_xendomain()) {
-+		struct xen_memory_map memmap;
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/interface.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/interface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ * 
++ * Block-device interface management.
++ * 
++ * Copyright (c) 2004, Keir Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+		memmap.nr_entries = E820MAX;
-+		set_xen_guest_handle(memmap.buffer, machine_e820.map);
++#include "common.h"
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
 +
-+		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+			BUG();
-+		machine_e820.nr_map = memmap.nr_entries;
++static kmem_cache_t *blkif_cachep;
 +
-+		e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
-+	}
-+#else
-+	e820_reserve_resources(e820.map, e820.nr_map);
-+#endif
++blkif_t *blkif_alloc(domid_t domid)
++{
++	blkif_t *blkif;
 +
-+	request_resource(&iomem_resource, &video_ram_resource);
++	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++	if (!blkif)
++		return ERR_PTR(-ENOMEM);
 +
-+	{
-+	unsigned i;
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
-+	}
++	memset(blkif, 0, sizeof(*blkif));
++	blkif->domid = domid;
++	spin_lock_init(&blkif->blk_ring_lock);
++	atomic_set(&blkif->refcnt, 1);
++	init_waitqueue_head(&blkif->wq);
++	blkif->st_print = jiffies;
++	init_waitqueue_head(&blkif->waiting_to_free);
 +
-+#ifdef CONFIG_XEN
-+	if (is_initial_xendomain())
-+		e820_setup_gap(machine_e820.map, machine_e820.nr_map);
-+#else
-+	e820_setup_gap(e820.map, e820.nr_map);
-+#endif
++	return blkif;
++}
 +
-+#ifdef CONFIG_XEN
-+	{
-+		struct physdev_set_iopl set_iopl;
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++	struct gnttab_map_grant_ref op;
 +
-+		set_iopl.iopl = 1;
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++			  GNTMAP_host_map, shared_page, blkif->domid);
 +
-+		if (is_initial_xendomain()) {
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+			conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+			conswitchp = &dummy_con;
-+#endif
-+#endif
-+		} else {
-+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-+			conswitchp = &dummy_con;
-+#endif
-+                }
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
++
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
 +	}
-+	xencons_early_setup();
-+#else	/* CONFIG_XEN */
 +
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+	conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+	conswitchp = &dummy_con;
-+#endif
-+#endif
++	blkif->shmem_ref = shared_page;
++	blkif->shmem_handle = op.handle;
 +
-+#endif /* !CONFIG_XEN */
++	return 0;
 +}
 +
-+#ifdef CONFIG_XEN
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++static void unmap_frontend_page(blkif_t *blkif)
 +{
-+	HYPERVISOR_shutdown(SHUTDOWN_crash);
-+	/* we're never actually going to get here... */
-+	return NOTIFY_DONE;
-+}
-+#endif /* !CONFIG_XEN */
++	struct gnttab_unmap_grant_ref op;
 +
++	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++			    GNTMAP_host_map, blkif->shmem_handle);
 +
-+static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
 +{
-+	unsigned int *v;
++	int err;
 +
-+	if (c->extended_cpuid_level < 0x80000004)
++	/* Already connected through? */
++	if (blkif->irq)
 +		return 0;
 +
-+	v = (unsigned int *) c->x86_model_id;
-+	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+	c->x86_model_id[48] = 0;
-+	return 1;
-+}
++	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++		return -ENOMEM;
 +
++	err = map_frontend_page(blkif, shared_page);
++	if (err) {
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
 +
-+static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+	unsigned int n, dummy, eax, ebx, ecx, edx;
++	switch (blkif->blk_protocol) {
++	case BLKIF_PROTOCOL_NATIVE:
++	{
++		blkif_sring_t *sring;
++		sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++		break;
++	}
++	case BLKIF_PROTOCOL_X86_32:
++	{
++		blkif_x86_32_sring_t *sring_x86_32;
++		sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++		break;
++	}
++	case BLKIF_PROTOCOL_X86_64:
++	{
++		blkif_x86_64_sring_t *sring_x86_64;
++		sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++		break;
++	}
++	default:
++		BUG();
++	}
 +
-+	n = c->extended_cpuid_level;
++	err = bind_interdomain_evtchn_to_irqhandler(
++		blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++	if (err < 0)
++	{
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_rings.common.sring = NULL;
++		return err;
++	}
++	blkif->irq = err;
 +
-+	if (n >= 0x80000005) {
-+		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size=(ecx>>24)+(edx>>24);
-+		/* On K8 L1 TLB is inclusive, so don't count it */
-+		c->x86_tlbsize = 0;
++	return 0;
++}
++
++void blkif_disconnect(blkif_t *blkif)
++{
++	if (blkif->xenblkd) {
++		kthread_stop(blkif->xenblkd);
++		blkif->xenblkd = NULL;
 +	}
 +
-+	if (n >= 0x80000006) {
-+		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
-+		ecx = cpuid_ecx(0x80000006);
-+		c->x86_cache_size = ecx >> 16;
-+		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++	atomic_dec(&blkif->refcnt);
++	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++	atomic_inc(&blkif->refcnt);
 +
-+		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+		c->x86_cache_size, ecx & 0xFF);
++	if (blkif->irq) {
++		unbind_from_irqhandler(blkif->irq, blkif);
++		blkif->irq = 0;
 +	}
 +
-+	if (n >= 0x80000007)
-+		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
-+	if (n >= 0x80000008) {
-+		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
++	if (blkif->blk_rings.common.sring) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_rings.common.sring = NULL;
 +	}
 +}
 +
-+#ifdef CONFIG_NUMA
-+static int nearby_node(int apicid)
++void blkif_free(blkif_t *blkif)
 +{
-+	int i;
-+	for (i = apicid - 1; i >= 0; i--) {
-+		int node = apicid_to_node[i];
-+		if (node != NUMA_NO_NODE && node_online(node))
-+			return node;
-+	}
-+	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-+		int node = apicid_to_node[i];
-+		if (node != NUMA_NO_NODE && node_online(node))
-+			return node;
-+	}
-+	return first_node(node_online_map); /* Shouldn't happen */
++	if (!atomic_dec_and_test(&blkif->refcnt))
++		BUG();
++	kmem_cache_free(blkif_cachep, blkif);
 +}
-+#endif
 +
-+/*
-+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-+ * Assumes number of cores is a power of two.
-+ */
-+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++void __init blkif_interface_init(void)
 +{
-+#ifdef CONFIG_SMP
-+	unsigned bits;
-+#ifdef CONFIG_NUMA
-+	int cpu = smp_processor_id();
-+	int node = 0;
-+	unsigned apicid = hard_smp_processor_id();
-+#endif
-+	unsigned ecx = cpuid_ecx(0x80000008);
-+
-+	c->x86_max_cores = (ecx & 0xff) + 1;
++	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
++					 0, 0, NULL, NULL);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/vbd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/vbd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,118 @@
++/******************************************************************************
++ * blkback/vbd.c
++ * 
++ * Routines for managing virtual block devices (VBDs).
++ * 
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	/* CPU telling us the core id bits shift? */
-+	bits = (ecx >> 12) & 0xF;
++#include "common.h"
 +
-+	/* Otherwise recompute */
-+	if (bits == 0) {
-+		while ((1 << bits) < c->x86_max_cores)
-+			bits++;
-+	}
++#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
++	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
 +
-+	/* Low order bits define the core id (index of core in socket) */
-+	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
-+	/* Convert the APIC ID into the socket ID */
-+	c->phys_proc_id = phys_pkg_id(bits);
++unsigned long long vbd_size(struct vbd *vbd)
++{
++	return vbd_sz(vbd);
++}
 +
-+#ifdef CONFIG_NUMA
-+  	node = c->phys_proc_id;
-+ 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
-+ 		node = apicid_to_node[apicid];
-+ 	if (!node_online(node)) {
-+ 		/* Two possibilities here:
-+ 		   - The CPU is missing memory and no node was created.
-+ 		   In that case try picking one from a nearby CPU
-+ 		   - The APIC IDs differ from the HyperTransport node IDs
-+ 		   which the K8 northbridge parsing fills in.
-+ 		   Assume they are all increased by a constant offset,
-+ 		   but in the same order as the HT nodeids.
-+ 		   If that doesn't result in a usable node fall back to the
-+ 		   path for the previous case.  */
-+ 		int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
-+ 		if (ht_nodeid >= 0 &&
-+ 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-+ 			node = apicid_to_node[ht_nodeid];
-+ 		/* Pick a nearby node */
-+ 		if (!node_online(node))
-+ 			node = nearby_node(apicid);
-+ 	}
-+	numa_set_node(cpu, node);
++unsigned int vbd_info(struct vbd *vbd)
++{
++	return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++}
 +
-+	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
-+#endif
++unsigned long vbd_secsize(struct vbd *vbd)
++{
++	return bdev_hardsect_size(vbd->bdev);
 +}
 +
-+static void __init init_amd(struct cpuinfo_x86 *c)
++int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
++	       unsigned minor, int readonly, int cdrom)
 +{
-+	unsigned level;
++	struct vbd *vbd;
++	struct block_device *bdev;
 +
-+#ifdef CONFIG_SMP
-+	unsigned long value;
++	vbd = &blkif->vbd;
++	vbd->handle   = handle; 
++	vbd->readonly = readonly;
++	vbd->type     = 0;
 +
-+	/*
-+	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
-+	 * bit 6 of msr C001_0015
-+ 	 *
-+	 * Errata 63 for SH-B3 steppings
-+	 * Errata 122 for all steppings (F+ have it disabled by default)
-+	 */
-+	if (c->x86 == 15) {
-+		rdmsrl(MSR_K8_HWCR, value);
-+		value |= 1 << 6;
-+		wrmsrl(MSR_K8_HWCR, value);
-+	}
-+#endif
++	vbd->pdevice  = MKDEV(major, minor);
 +
-+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-+	clear_bit(0*32+31, &c->x86_capability);
-+	
-+	/* On C+ stepping K8 rep microcode works well for copy/memset */
-+	level = cpuid_eax(1);
-+	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
-+		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++	bdev = open_by_devnum(vbd->pdevice,
++			      vbd->readonly ? FMODE_READ : FMODE_WRITE);
 +
-+	/* Enable workaround for FXSAVE leak */
-+	if (c->x86 >= 6)
-+		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++	if (IS_ERR(bdev)) {
++		DPRINTK("vbd_creat: device %08x could not be opened.\n",
++			vbd->pdevice);
++		return -ENOENT;
++	}
 +
-+	level = get_model_name(c);
-+	if (!level) {
-+		switch (c->x86) { 
-+		case 15:
-+			/* Should distinguish Models here, but this is only
-+			   a fallback anyways. */
-+			strcpy(c->x86_model_id, "Hammer");
-+			break; 
-+		} 
-+	} 
-+	display_cacheinfo(c);
++	vbd->bdev = bdev;
 +
-+	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-+	if (c->x86_power & (1<<8))
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++	if (vbd->bdev->bd_disk == NULL) {
++		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++			vbd->pdevice);
++		vbd_free(vbd);
++		return -ENOENT;
++	}
 +
-+	/* Multi core CPU? */
-+	if (c->extended_cpuid_level >= 0x80000008)
-+		amd_detect_cmp(c);
++	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
++		vbd->type |= VDISK_CDROM;
++	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
++		vbd->type |= VDISK_REMOVABLE;
 +
-+	/* Fix cpuid4 emulation for more */
-+	num_cache_leaves = 3;
++	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
++		handle, blkif->domid);
++	return 0;
 +}
 +
-+static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++void vbd_free(struct vbd *vbd)
 +{
-+#ifdef CONFIG_SMP
-+	u32 	eax, ebx, ecx, edx;
-+	int 	index_msb, core_bits;
++	if (vbd->bdev)
++		blkdev_put(vbd->bdev);
++	vbd->bdev = NULL;
++}
 +
-+	cpuid(1, &eax, &ebx, &ecx, &edx);
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++{
++	struct vbd *vbd = &blkif->vbd;
++	int rc = -EACCES;
 +
++	if ((operation != READ) && vbd->readonly)
++		goto out;
 +
-+	if (!cpu_has(c, X86_FEATURE_HT))
-+		return;
-+ 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
 +		goto out;
 +
-+	smp_num_siblings = (ebx & 0xff0000) >> 16;
++	req->dev  = vbd->pdevice;
++	req->bdev = vbd->bdev;
++	rc = 0;
 +
-+	if (smp_num_siblings == 1) {
-+		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-+	} else if (smp_num_siblings > 1 ) {
++ out:
++	return rc;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkback/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkback/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,541 @@
++/*  Xenbus code for blkif backend
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++    Copyright (C) 2005 XenSource Ltd
 +
-+		if (smp_num_siblings > NR_CPUS) {
-+			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+			smp_num_siblings = 1;
-+			return;
-+		}
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
 +
-+		index_msb = get_count_order(smp_num_siblings);
-+		c->phys_proc_id = phys_pkg_id(index_msb);
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
 +
-+		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
 +
-+		index_msb = get_count_order(smp_num_siblings) ;
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include "common.h"
 +
-+		core_bits = get_count_order(c->x86_max_cores);
++#undef DPRINTK
++#define DPRINTK(fmt, args...)				\
++	pr_debug("blkback/xenbus (%s:%d) " fmt ".\n",	\
++		 __FUNCTION__, __LINE__, ##args)
 +
-+		c->cpu_core_id = phys_pkg_id(index_msb) &
-+					       ((1 << core_bits) - 1);
-+	}
-+out:
-+	if ((c->x86_max_cores * smp_num_siblings) > 1) {
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
-+		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
-+	}
++struct backend_info
++{
++	struct xenbus_device *dev;
++	blkif_t *blkif;
++	struct xenbus_watch backend_watch;
++	unsigned major;
++	unsigned minor;
++	char *mode;
++};
 +
-+#endif
-+}
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++			    unsigned int);
 +
-+/*
-+ * find out the number of processor cores on the die
-+ */
-+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++static int blkback_name(blkif_t *blkif, char *buf)
 +{
-+	unsigned int eax, t;
++	char *devpath, *devname;
++	struct xenbus_device *dev = blkif->be->dev;
 +
-+	if (c->cpuid_level < 4)
-+		return 1;
++	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++	if (IS_ERR(devpath)) 
++		return PTR_ERR(devpath);
++	
++	if ((devname = strstr(devpath, "/dev/")) != NULL)
++		devname += strlen("/dev/");
++	else
++		devname  = devpath;
 +
-+	cpuid_count(4, 0, &eax, &t, &t, &t);
++	snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
++	kfree(devpath);
++	
++	return 0;
++}
 +
-+	if (eax & 0x1f)
-+		return ((eax >> 26) + 1);
-+	else
-+		return 1;
++static void update_blkif_status(blkif_t *blkif)
++{ 
++	int err;
++	char name[TASK_COMM_LEN];
++
++	/* Not ready to connect? */
++	if (!blkif->irq || !blkif->vbd.bdev)
++		return;
++
++	/* Already connected? */
++	if (blkif->be->dev->state == XenbusStateConnected)
++		return;
++
++	/* Attempt to connect: exit if we fail to. */
++	connect(blkif->be);
++	if (blkif->be->dev->state != XenbusStateConnected)
++		return;
++
++	err = blkback_name(blkif, name);
++	if (err) {
++		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
++		return;
++	}
++
++	blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
++	if (IS_ERR(blkif->xenblkd)) {
++		err = PTR_ERR(blkif->xenblkd);
++		blkif->xenblkd = NULL;
++		xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
++	}
 +}
 +
-+static void srat_detect_node(void)
-+{
-+#ifdef CONFIG_NUMA
-+	unsigned node;
-+	int cpu = smp_processor_id();
-+	int apicid = hard_smp_processor_id();
 +
-+	/* Don't do the funky fallback heuristics the AMD version employs
-+	   for now. */
-+	node = apicid_to_node[apicid];
-+	if (node == NUMA_NO_NODE)
-+		node = first_node(node_online_map);
-+	numa_set_node(cpu, node);
++/****************************************************************
++ *  sysfs interface for VBD I/O requests
++ */
++
++#define VBD_SHOW(name, format, args...)					\
++	static ssize_t show_##name(struct device *_dev,			\
++				   struct device_attribute *attr,	\
++				   char *buf)				\
++	{								\
++		struct xenbus_device *dev = to_xenbus_device(_dev);	\
++		struct backend_info *be = dev->dev.driver_data;		\
++									\
++		return sprintf(buf, format, ##args);			\
++	}								\
++	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(br_req,  "%d\n", be->blkif->st_br_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *vbdstat_attrs[] = {
++	&dev_attr_oo_req.attr,
++	&dev_attr_rd_req.attr,
++	&dev_attr_wr_req.attr,
++	&dev_attr_br_req.attr,
++	&dev_attr_rd_sect.attr,
++	&dev_attr_wr_sect.attr,
++	NULL
++};
++
++static struct attribute_group vbdstat_group = {
++	.name = "statistics",
++	.attrs = vbdstat_attrs,
++};
 +
-+	if (acpi_numa > 0)
-+		printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
-+}
++VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
++VBD_SHOW(mode, "%s\n", be->mode);
 +
-+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++int xenvbd_sysfs_addif(struct xenbus_device *dev)
 +{
-+	/* Cache sizes */
-+	unsigned n;
++	int error;
++	
++	error = device_create_file(&dev->dev, &dev_attr_physical_device);
++ 	if (error)
++		goto fail1;
 +
-+	init_intel_cacheinfo(c);
-+	if (c->cpuid_level > 9 ) {
-+		unsigned eax = cpuid_eax(10);
-+		/* Check for version and the number of counters */
-+		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-+			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
-+	}
++	error = device_create_file(&dev->dev, &dev_attr_mode);
++	if (error)
++		goto fail2;
 +
-+	n = c->extended_cpuid_level;
-+	if (n >= 0x80000008) {
-+		unsigned eax = cpuid_eax(0x80000008);
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
-+		/* CPUID workaround for Intel 0F34 CPU */
-+		if (c->x86_vendor == X86_VENDOR_INTEL &&
-+		    c->x86 == 0xF && c->x86_model == 0x3 &&
-+		    c->x86_mask == 0x4)
-+			c->x86_phys_bits = 36;
-+	}
++	error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
++	if (error)
++		goto fail3;
 +
-+	if (c->x86 == 15)
-+		c->x86_cache_alignment = c->x86_clflush_size * 2;
-+	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+	set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+ 	c->x86_max_cores = intel_num_cpu_cores(c);
++	return 0;
 +
-+	srat_detect_node();
++fail3:	sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
++fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
++	return error;
 +}
 +
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++void xenvbd_sysfs_delif(struct xenbus_device *dev)
 +{
-+	char *v = c->x86_vendor_id;
-+
-+	if (!strcmp(v, "AuthenticAMD"))
-+		c->x86_vendor = X86_VENDOR_AMD;
-+	else if (!strcmp(v, "GenuineIntel"))
-+		c->x86_vendor = X86_VENDOR_INTEL;
-+	else
-+		c->x86_vendor = X86_VENDOR_UNKNOWN;
++	sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++	device_remove_file(&dev->dev, &dev_attr_mode);
++	device_remove_file(&dev->dev, &dev_attr_physical_device);
 +}
 +
-+struct cpu_model_info {
-+	int vendor;
-+	int family;
-+	char *model_names[16];
-+};
-+
-+/* Do some early cpuid on the boot CPU to get some parameter that are
-+   needed before check_bugs. Everything advanced is in identify_cpu
-+   below. */
-+void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++static int blkback_remove(struct xenbus_device *dev)
 +{
-+	u32 tfms;
++	struct backend_info *be = dev->dev.driver_data;
 +
-+	c->loops_per_jiffy = loops_per_jiffy;
-+	c->x86_cache_size = -1;
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
-+	c->x86_vendor_id[0] = '\0'; /* Unset */
-+	c->x86_model_id[0] = '\0';  /* Unset */
-+	c->x86_clflush_size = 64;
-+	c->x86_cache_alignment = c->x86_clflush_size;
-+	c->x86_max_cores = 1;
-+	c->extended_cpuid_level = 0;
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++	DPRINTK("");
 +
-+	/* Get vendor name */
-+	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+	      (unsigned int *)&c->x86_vendor_id[0],
-+	      (unsigned int *)&c->x86_vendor_id[8],
-+	      (unsigned int *)&c->x86_vendor_id[4]);
-+		
-+	get_cpu_vendor(c);
++	if (be->major || be->minor)
++		xenvbd_sysfs_delif(dev);
 +
-+	/* Initialize the standard set of capabilities */
-+	/* Note that the vendor-specific code below might override */
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
 +
-+	/* Intel-defined flags: level 0x00000001 */
-+	if (c->cpuid_level >= 0x00000001) {
-+		__u32 misc;
-+		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
-+		      &c->x86_capability[0]);
-+		c->x86 = (tfms >> 8) & 0xf;
-+		c->x86_model = (tfms >> 4) & 0xf;
-+		c->x86_mask = tfms & 0xf;
-+		if (c->x86 == 0xf)
-+			c->x86 += (tfms >> 20) & 0xff;
-+		if (c->x86 >= 0x6)
-+			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+		if (c->x86_capability[0] & (1<<19)) 
-+			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-+	} else {
-+		/* Have CPUID level 0 only - unheard of */
-+		c->x86 = 4;
++	if (be->blkif) {
++		blkif_disconnect(be->blkif);
++		vbd_free(&be->blkif->vbd);
++		blkif_free(be->blkif);
++		be->blkif = NULL;
 +	}
 +
-+#ifdef CONFIG_SMP
-+	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
++	kfree(be);
++	dev->dev.driver_data = NULL;
++	return 0;
 +}
 +
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++int blkback_barrier(struct xenbus_transaction xbt,
++		    struct backend_info *be, int state)
 +{
-+	int i;
-+	u32 xlvl;
++	struct xenbus_device *dev = be->dev;
++	int err;
 +
-+	early_identify_cpu(c);
++	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
++			    "%d", state);
++	if (err)
++		xenbus_dev_fatal(dev, err, "writing feature-barrier");
 +
-+	/* AMD-defined flags: level 0x80000001 */
-+	xlvl = cpuid_eax(0x80000000);
-+	c->extended_cpuid_level = xlvl;
-+	if ((xlvl & 0xffff0000) == 0x80000000) {
-+		if (xlvl >= 0x80000001) {
-+			c->x86_capability[1] = cpuid_edx(0x80000001);
-+			c->x86_capability[6] = cpuid_ecx(0x80000001);
-+		}
-+		if (xlvl >= 0x80000004)
-+			get_model_name(c); /* Default name */
-+	}
++	return err;
++}
 +
-+	/* Transmeta-defined flags: level 0x80860001 */
-+	xlvl = cpuid_eax(0x80860000);
-+	if ((xlvl & 0xffff0000) == 0x80860000) {
-+		/* Don't set x86_cpuid_level here for now to not confuse. */
-+		if (xlvl >= 0x80860001)
-+			c->x86_capability[2] = cpuid_edx(0x80860001);
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's physical major and minor numbers.  Switch to InitWait.
++ */
++static int blkback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
++{
++	int err;
++	struct backend_info *be = kzalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
 +	}
++	be->dev = dev;
++	dev->dev.driver_data = be;
 +
-+	c->apicid = phys_pkg_id(0);
-+
-+	/*
-+	 * Vendor-specific initialization.  In this section we
-+	 * canonicalize the feature flags, meaning if there are
-+	 * features a certain CPU supports which CPUID doesn't
-+	 * tell us, CPUID claiming incorrect flags, or other bugs,
-+	 * we handle them here.
-+	 *
-+	 * At the end of this section, c->x86_capability better
-+	 * indicate the features this CPU genuinely supports!
-+	 */
-+	switch (c->x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		init_amd(c);
-+		break;
-+
-+	case X86_VENDOR_INTEL:
-+		init_intel(c);
-+		break;
-+
-+	case X86_VENDOR_UNKNOWN:
-+	default:
-+		display_cacheinfo(c);
-+		break;
++	be->blkif = blkif_alloc(dev->otherend_id);
++	if (IS_ERR(be->blkif)) {
++		err = PTR_ERR(be->blkif);
++		be->blkif = NULL;
++		xenbus_dev_fatal(dev, err, "creating block interface");
++		goto fail;
 +	}
 +
-+	select_idle_routine(c);
-+	detect_ht(c); 
++	/* setup back pointer */
++	be->blkif->be = be;
 +
-+	/*
-+	 * On SMP, boot_cpu_data holds the common feature set between
-+	 * all CPUs; so make sure that we indicate which features are
-+	 * common between the CPUs.  The first time this routine gets
-+	 * executed, c == &boot_cpu_data.
-+	 */
-+	if (c != &boot_cpu_data) {
-+		/* AND the already accumulated flags with these */
-+		for (i = 0 ; i < NCAPINTS ; i++)
-+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+	}
++	err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
++				 &be->backend_watch, backend_changed);
++	if (err)
++		goto fail;
 +
-+#ifdef CONFIG_X86_MCE
-+	mcheck_init(c);
-+#endif
-+	if (c == &boot_cpu_data)
-+		mtrr_bp_init();
-+	else
-+		mtrr_ap_init();
-+#ifdef CONFIG_NUMA
-+	numa_add_cpu(smp_processor_id());
-+#endif
-+}
-+ 
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
++	if (err)
++		goto fail;
 +
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+	if (c->x86_model_id[0])
-+		printk("%s", c->x86_model_id);
++	return 0;
 +
-+	if (c->x86_mask || c->cpuid_level >= 0) 
-+		printk(" stepping %02x\n", c->x86_mask);
-+	else
-+		printk("\n");
++fail:
++	DPRINTK("failed");
++	blkback_remove(dev);
++	return err;
 +}
 +
-+/*
-+ *	Get CPU information for use by the procfs.
-+ */
 +
-+static int show_cpuinfo(struct seq_file *m, void *v)
++/**
++ * Callback received when the hotplug scripts have placed the physical-device
++ * node.  Read it and the mode node, and create a vbd.  If the frontend is
++ * ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
 +{
-+	struct cpuinfo_x86 *c = v;
++	int err;
++	unsigned major;
++	unsigned minor;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++	int cdrom = 0;
++	char *device_type;
 +
-+	/* 
-+	 * These flag bits must match the definitions in <asm/cpufeature.h>.
-+	 * NULL means this bit is undefined or reserved; either way it doesn't
-+	 * have meaning as far as Linux is concerned.  Note that it's important
-+	 * to realize there is a difference between this table and CPUID -- if
-+	 * applications want to get the raw CPUID data, they should access
-+	 * /dev/cpu/<cpu_nr>/cpuid instead.
-+	 */
-+	static char *x86_cap_flags[] = {
-+		/* Intel-defined */
-+	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++	DPRINTK("");
 +
-+		/* AMD-defined */
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-+		NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
++			   &major, &minor);
++	if (XENBUS_EXIST_ERR(err)) {
++		/* Since this watch will fire once immediately after it is
++		   registered, we expect this.  Ignore it, and wait for the
++		   hotplug scripts. */
++		return;
++	}
++	if (err != 2) {
++		xenbus_dev_fatal(dev, err, "reading physical-device");
++		return;
++	}
 +
-+		/* Transmeta-defined */
-+		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	if ((be->major || be->minor) &&
++	    ((be->major != major) || (be->minor != minor))) {
++		printk(KERN_WARNING
++		       "blkback: changing physical device (from %x:%x to "
++		       "%x:%x) not supported.\n", be->major, be->minor,
++		       major, minor);
++		return;
++	}
 +
-+		/* Other (Linux-defined) */
-+		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
-+		"constant_tsc", NULL, NULL,
-+		"up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
++	if (IS_ERR(be->mode)) {
++		err = PTR_ERR(be->mode);
++		be->mode = NULL;
++		xenbus_dev_fatal(dev, err, "reading mode");
++		return;
++	}
 +
-+		/* Intel-defined (#2) */
-+		"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
-+		"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
++	if (!IS_ERR(device_type)) {
++		cdrom = strcmp(device_type, "cdrom") == 0;
++		kfree(device_type);
++	}
 +
-+		/* VIA/Cyrix/Centaur-defined */
-+		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	if (be->major == 0 && be->minor == 0) {
++		/* Front end dir is a number, which is used as the handle. */
 +
-+		/* AMD-defined (#2) */
-+		"lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+	};
-+	static char *x86_power_flags[] = { 
-+		"ts",	/* temperature sensor */
-+		"fid",  /* frequency id control */
-+		"vid",  /* voltage id control */
-+		"ttp",  /* thermal trip */
-+		"tm",
-+		"stc",
-+		NULL,
-+		/* nothing */	/* constant_tsc - moved to flags */
-+	};
++		char *p = strrchr(dev->otherend, '/') + 1;
++		long handle = simple_strtoul(p, NULL, 0);
 +
++		be->major = major;
++		be->minor = minor;
 +
-+#ifdef CONFIG_SMP
-+	if (!cpu_online(c-cpu_data))
-+		return 0;
-+#endif
++		err = vbd_create(be->blkif, handle, major, minor,
++				 (NULL == strchr(be->mode, 'w')), cdrom);
++		if (err) {
++			be->major = be->minor = 0;
++			xenbus_dev_fatal(dev, err, "creating vbd structure");
++			return;
++		}
 +
-+	seq_printf(m,"processor\t: %u\n"
-+		     "vendor_id\t: %s\n"
-+		     "cpu family\t: %d\n"
-+		     "model\t\t: %d\n"
-+		     "model name\t: %s\n",
-+		     (unsigned)(c-cpu_data),
-+		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+		     c->x86,
-+		     (int)c->x86_model,
-+		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+	
-+	if (c->x86_mask || c->cpuid_level >= 0)
-+		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-+	else
-+		seq_printf(m, "stepping\t: unknown\n");
-+	
-+	if (cpu_has(c,X86_FEATURE_TSC)) {
-+		unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
-+		if (!freq)
-+			freq = cpu_khz;
-+		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-+			     freq / 1000, (freq % 1000));
-+	}
++		err = xenvbd_sysfs_addif(dev);
++		if (err) {
++			vbd_free(&be->blkif->vbd);
++			be->major = be->minor = 0;
++			xenbus_dev_fatal(dev, err, "creating sysfs entries");
++			return;
++		}
 +
-+	/* Cache size */
-+	if (c->x86_cache_size >= 0) 
-+		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-+	
-+#ifdef CONFIG_SMP
-+	if (smp_num_siblings * c->x86_max_cores > 1) {
-+		int cpu = c - cpu_data;
-+		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-+		seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
-+		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-+		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++		/* We're potentially connected now */
++		update_blkif_status(be->blkif);
 +	}
-+#endif	
++}
 +
-+	seq_printf(m,
-+	        "fpu\t\t: yes\n"
-+	        "fpu_exception\t: yes\n"
-+	        "cpuid level\t: %d\n"
-+	        "wp\t\t: yes\n"
-+	        "flags\t\t:",
-+		   c->cpuid_level);
 +
-+	{ 
-+		int i; 
-+		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-+			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
-+				seq_printf(m, " %s", x86_cap_flags[i]);
-+	}
-+		
-+	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-+		   c->loops_per_jiffy/(500000/HZ),
-+		   (c->loops_per_jiffy/(5000/HZ)) % 100);
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state)
++{
++	struct backend_info *be = dev->dev.driver_data;
++	int err;
 +
-+	if (c->x86_tlbsize > 0) 
-+		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-+	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-+	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++	DPRINTK("%s", xenbus_strstate(frontend_state));
 +
-+	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
-+		   c->x86_phys_bits, c->x86_virt_bits);
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++		if (dev->state == XenbusStateClosed) {
++			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++			       __FUNCTION__, dev->nodename);
++			xenbus_switch_state(dev, XenbusStateInitWait);
++		}
++		break;
 +
-+	seq_printf(m, "power management:");
-+	{
-+		unsigned i;
-+		for (i = 0; i < 32; i++) 
-+			if (c->x86_power & (1 << i)) {
-+				if (i < ARRAY_SIZE(x86_power_flags) &&
-+					x86_power_flags[i])
-+					seq_printf(m, "%s%s",
-+						x86_power_flags[i][0]?" ":"",
-+						x86_power_flags[i]);
-+				else
-+					seq_printf(m, " [%d]", i);
-+			}
-+	}
++	case XenbusStateInitialised:
++	case XenbusStateConnected:
++		/* Ensure we connect even when two watches fire in 
++		   close successsion and we miss the intermediate value 
++		   of frontend_state. */
++		if (dev->state == XenbusStateConnected)
++			break;
 +
-+	seq_printf(m, "\n\n");
++		err = connect_ring(be);
++		if (err)
++			break;
++		update_blkif_status(be->blkif);
++		break;
 +
-+	return 0;
-+}
++	case XenbusStateClosing:
++		blkif_disconnect(be->blkif);
++		xenbus_switch_state(dev, XenbusStateClosing);
++		break;
 +
-+static void *c_start(struct seq_file *m, loff_t *pos)
-+{
-+	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-+}
++	case XenbusStateClosed:
++		xenbus_switch_state(dev, XenbusStateClosed);
++		if (xenbus_dev_is_online(dev))
++			break;
++		/* fall through if not online */
++	case XenbusStateUnknown:
++		device_unregister(&dev->dev);
++		break;
 +
-+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+	++*pos;
-+	return c_start(m, pos);
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++				 frontend_state);
++		break;
++	}
 +}
 +
-+static void c_stop(struct seq_file *m, void *v)
-+{
-+}
 +
-+struct seq_operations cpuinfo_op = {
-+	.start =c_start,
-+	.next =	c_next,
-+	.stop =	c_stop,
-+	.show =	show_cpuinfo,
-+};
++/* ** Connection ** */
 +
-+#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
-+#include <linux/platform_device.h>
-+static __init int add_pcspkr(void)
-+{
-+	struct platform_device *pd;
-+	int ret;
 +
-+	if (!is_initial_xendomain())
-+		return 0;
++/**
++ * Write the physical details regarding the block device to the store, and
++ * switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++	struct xenbus_transaction xbt;
++	int err;
++	struct xenbus_device *dev = be->dev;
 +
-+	pd = platform_device_alloc("pcspkr", -1);
-+	if (!pd)
-+		return -ENOMEM;
++	DPRINTK("%s", dev->otherend);
 +
-+	ret = platform_device_add(pd);
-+	if (ret)
-+		platform_device_put(pd);
++	/* Supply the information about the device the frontend needs */
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		return;
++	}
 +
-+	return ret;
-+}
-+device_initcall(add_pcspkr);
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/smp-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/smp-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/smp-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,600 @@
-+/*
-+ *	Intel SMP support routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
-+ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
-+ */
++	err = blkback_barrier(xbt, be, 1);
++	if (err)
++		goto abort;
 +
-+#include <linux/init.h>
++	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
++			    vbd_size(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/sectors",
++				 dev->nodename);
++		goto abort;
++	}
 +
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/smp.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/interrupt.h>
++	/* FIXME: use a typename instead */
++	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
++			    vbd_info(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/info",
++				 dev->nodename);
++		goto abort;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
++			    vbd_secsize(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
++				 dev->nodename);
++		goto abort;
++	}
 +
-+#include <asm/mtrr.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mach_apic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/apicdef.h>
-+#include <asm/idle.h>
-+#ifdef CONFIG_XEN
-+#include <xen/evtchn.h>
-+#endif
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err)
++		xenbus_dev_fatal(dev, err, "ending transaction");
 +
-+#ifndef CONFIG_XEN
-+/*
-+ *	Smarter SMP flushing macros. 
-+ *		c/o Linus Torvalds.
-+ *
-+ *	These mean you can really definitely utterly forget about
-+ *	writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-+ *
-+ * 	More scalable flush, from Andi Kleen
-+ *
-+ * 	To avoid global state use 8 different call vectors.
-+ * 	Each CPU uses a specific vector to trigger flushes on other
-+ * 	CPUs. Depending on the received vector the target CPUs look into
-+ *	the right per cpu variable for the flush data.
-+ *
-+ * 	With more than 8 CPUs they are hashed to the 8 available
-+ * 	vectors. The limited global vector space forces us to this right now.
-+ *	In future when interrupts are split into per CPU domains this could be
-+ *	fixed, at the cost of triggering multiple IPIs in some cases.
-+ */
++	err = xenbus_switch_state(dev, XenbusStateConnected);
++	if (err)
++		xenbus_dev_fatal(dev, err, "switching to Connected state",
++				 dev->nodename);
 +
-+union smp_flush_state {
-+	struct {
-+		cpumask_t flush_cpumask;
-+		struct mm_struct *flush_mm;
-+		unsigned long flush_va;
-+#define FLUSH_ALL	-1ULL
-+		spinlock_t tlbstate_lock;
-+	};
-+	char pad[SMP_CACHE_BYTES];
-+} ____cacheline_aligned;
++	return;
++ abort:
++	xenbus_transaction_end(xbt, 1);
++}
 +
-+/* State is put into the per CPU data section, but padded
-+   to a full cache line because other CPUs can access it and we don't
-+   want false sharing in the per cpu data segment. */
-+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-+#endif
 +
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context, 
-+ * instead update mm->cpu_vm_mask.
-+ */
-+static inline void leave_mm(unsigned long cpu)
++static int connect_ring(struct backend_info *be)
 +{
-+	if (read_pda(mmu_state) == TLBSTATE_OK)
-+		BUG();
-+	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-+	load_cr3(swapper_pg_dir);
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * 	Stop ipi delivery for the old mm. This is not synchronized with
-+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * 	for the wrong mm, and in the worst case we perform a superfluous
-+ * 	tlb flush.
-+ * 1a2) set cpu mmu_state to TLBSTATE_OK
-+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	was in lazy tlb mode.
-+ * 1a3) update cpu active_mm
-+ * 	Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * 	Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ *	cpu active_mm is correct, cpu0 already handles
-+ *	flush ipis.
-+ * 1b1) set cpu mmu_state to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * 	Atomically set the bit [other cpus will start sending flush ipis],
-+ * 	and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ *   runs in kernel space, the cpu could load tlb entries for user space
-+ *   pages.
-+ *
-+ * The good news is that cpu mmu_state is local to each cpu, no
-+ * write/read ordering problems.
-+ */
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	char protocol[64] = "";
++	int err;
 +
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ *
-+ * Interrupts are disabled.
-+ */
++	DPRINTK("%s", dev->otherend);
 +
-+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-+{
-+	int cpu;
-+	int sender;
-+	union smp_flush_state *f;
++	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
 +
-+	cpu = smp_processor_id();
-+	/*
-+	 * orig_rax contains the negated interrupt vector.
-+	 * Use that to determine where the sender put the data.
-+	 */
-+	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
-+	f = &per_cpu(flush_state, sender);
++	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++			    "%63s", protocol, NULL);
++	if (err)
++		strcpy(protocol, "unspecified, assuming native");
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++	else {
++		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++		return -1;
++	}
++	printk(KERN_INFO
++	       "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 +
-+	if (!cpu_isset(cpu, f->flush_cpumask))
-+		goto out;
-+		/* 
-+		 * This was a BUG() but until someone can quote me the
-+		 * line from the intel manual that guarantees an IPI to
-+		 * multiple CPUs is retried _only_ on the erroring CPUs
-+		 * its staying as a return
-+		 *
-+		 * BUG();
-+		 */
-+		 
-+	if (f->flush_mm == read_pda(active_mm)) {
-+		if (read_pda(mmu_state) == TLBSTATE_OK) {
-+			if (f->flush_va == FLUSH_ALL)
-+				local_flush_tlb();
-+			else
-+				__flush_tlb_one(f->flush_va);
-+		} else
-+			leave_mm(cpu);
++	/* Map the shared frame, irq etc. */
++	err = blkif_map(be->blkif, ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++				 ring_ref, evtchn);
++		return err;
 +	}
-+out:
-+	ack_APIC_irq();
-+	cpu_clear(cpu, f->flush_cpumask);
++
++	return 0;
 +}
 +
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+						unsigned long va)
-+{
-+	int sender;
-+	union smp_flush_state *f;
 +
-+	/* Caller has disabled preemption */
-+	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-+	f = &per_cpu(flush_state, sender);
++/* ** Driver Registration ** */
 +
-+	/* Could avoid this lock when
-+	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-+	   probably not worth checking this for a cache-hot lock. */
-+	spin_lock(&f->tlbstate_lock);
 +
-+	f->flush_mm = mm;
-+	f->flush_va = va;
-+	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++static const struct xenbus_device_id blkback_ids[] = {
++	{ "vbd" },
++	{ "" }
++};
 +
-+	/*
-+	 * We have to send the IPI only to
-+	 * CPUs affected.
-+	 */
-+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
 +
-+	while (!cpus_empty(f->flush_cpumask))
-+		cpu_relax();
++static struct xenbus_driver blkback = {
++	.name = "vbd",
++	.owner = THIS_MODULE,
++	.ids = blkback_ids,
++	.probe = blkback_probe,
++	.remove = blkback_remove,
++	.otherend_changed = frontend_changed
++};
 +
-+	f->flush_mm = NULL;
-+	f->flush_va = 0;
-+	spin_unlock(&f->tlbstate_lock);
-+}
 +
-+int __cpuinit init_smp_flush(void)
++void blkif_xenbus_init(void)
 +{
-+	int i;
-+	for_each_cpu_mask(i, cpu_possible_map) {
-+		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
-+	}
-+	return 0;
++	xenbus_register_backend(&blkback);
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkfront/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkfront/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
 +
-+core_initcall(init_smp_flush);
-+	
-+void flush_tlb_current_task(void)
-+{
-+	struct mm_struct *mm = current->mm;
-+	cpumask_t cpu_mask;
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	:= xenblk.o
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++xenblk-objs := blkfront.o vbd.o
 +
-+	local_flush_tlb();
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_current_task);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkfront/blkfront.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkfront/blkfront.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,936 @@
++/******************************************************************************
++ * blkfront.c
++ * 
++ * XenLinux virtual block-device driver.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2005, XenSource Ltd
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+	cpumask_t cpu_mask;
++#include <linux/version.h>
++#include "block.h"
++#include <linux/cdrom.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <asm/maddr.h>
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+	if (current->active_mm == mm) {
-+		if (current->mm)
-+			local_flush_tlb();
-+		else
-+			leave_mm(smp_processor_id());
-+	}
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++#define BLKIF_STATE_DISCONNECTED 0
++#define BLKIF_STATE_CONNECTED    1
++#define BLKIF_STATE_SUSPENDED    2
 +
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_mm);
++#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
++    (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
++#define GRANT_INVALID_REF	0
 +
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++static void connect(struct blkfront_info *);
++static void blkfront_closing(struct xenbus_device *);
++static int blkfront_remove(struct xenbus_device *);
++static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
++static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++
++static void kick_pending_request_queues(struct blkfront_info *);
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static void blkif_restart_queue(void *arg);
++static void blkif_recover(struct blkfront_info *);
++static void blkif_completion(struct blk_shadow *);
++static void blkif_free(struct blkfront_info *, int);
++
++
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures and the ring buffer for communication with the backend, and
++ * inform the backend of the appropriate details for those.  Switch to
++ * Initialised state.
++ */
++static int blkfront_probe(struct xenbus_device *dev,
++			  const struct xenbus_device_id *id)
 +{
-+	struct mm_struct *mm = vma->vm_mm;
-+	cpumask_t cpu_mask;
++	int err, vdevice, i;
++	struct blkfront_info *info;
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	/* FIXME: Use dynamic device id if this is not set. */
++	err = xenbus_scanf(XBT_NIL, dev->nodename,
++			   "virtual-device", "%i", &vdevice);
++	if (err != 1) {
++		/* go looking in the extended area instead */
++		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
++			"%i", &vdevice);
++		if (err != 1) {
++			xenbus_dev_fatal(dev, err, "reading virtual-device");
++			return err;
++		}
++	}
 +
-+	if (current->active_mm == mm) {
-+		if(current->mm)
-+			__flush_tlb_one(va);
-+		 else
-+		 	leave_mm(smp_processor_id());
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (!info) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
 +	}
 +
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, va);
++	info->xbdev = dev;
++	info->vdevice = vdevice;
++	info->connected = BLKIF_STATE_DISCONNECTED;
++	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++
++	for (i = 0; i < BLK_RING_SIZE; i++)
++		info->shadow[i].req.id = i+1;
++	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
 +
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_page);
++	/* Front end dir is a number, which is used as the id. */
++	info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
++	dev->dev.driver_data = info;
 +
-+static void do_flush_tlb_all(void* info)
-+{
-+	unsigned long cpu = smp_processor_id();
++	err = talk_to_backend(dev, info);
++	if (err) {
++		kfree(info);
++		dev->dev.driver_data = NULL;
++		return err;
++	}
 +
-+	__flush_tlb_all();
-+	if (read_pda(mmu_state) == TLBSTATE_LAZY)
-+		leave_mm(cpu);
++	return 0;
 +}
 +
-+void flush_tlb_all(void)
-+{
-+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+#else
-+asmlinkage void smp_invalidate_interrupt (void)
-+{ return; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm (struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+#endif /* Xen */
 +
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart.  We tear down our blkif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
 + */
-+
-+void smp_send_reschedule(int cpu)
++static int blkfront_resume(struct xenbus_device *dev)
 +{
-+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
++	struct blkfront_info *info = dev->dev.driver_data;
++	int err;
 +
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
++	DPRINTK("blkfront_resume: %s\n", dev->nodename);
 +
-+struct call_data_struct {
-+	void (*func) (void *info);
-+	void *info;
-+	atomic_t started;
-+	atomic_t finished;
-+	int wait;
-+};
++	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 +
-+static struct call_data_struct * call_data;
++	err = talk_to_backend(dev, info);
++	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
++		blkif_recover(info);
 +
-+void lock_ipi_call_lock(void)
-+{
-+	spin_lock_irq(&call_lock);
++	return err;
 +}
 +
-+void unlock_ipi_call_lock(void)
-+{
-+	spin_unlock_irq(&call_lock);
-+}
 +
-+/*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ *
-+ * cpu is a standard Linux logical CPU number.
-+ */
-+static void
-+__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++			   struct blkfront_info *info)
 +{
-+	struct call_data_struct data;
-+	int cpus = 1;
++	const char *message = NULL;
++	struct xenbus_transaction xbt;
++	int err;
 +
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
++	/* Create shared ring, alloc event channel. */
++	err = setup_blkring(dev, info);
++	if (err)
++		goto out;
 +
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_blkring;
++	}
 +
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+		cpu_relax();
++	err = xenbus_printf(xbt, dev->nodename,
++			    "ring-ref","%u", info->ring_ref);
++	if (err) {
++		message = "writing ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    irq_to_evtchn_port(info->irq));
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++			    XEN_IO_PROTO_ABI_NATIVE);
++	if (err) {
++		message = "writing protocol";
++		goto abort_transaction;
++	}
 +
-+	if (!wait)
-+		return;
++	err = xenbus_transaction_end(xbt, 0);
++	if (err) {
++		if (err == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_blkring;
++	}
 +
-+	while (atomic_read(&data.finished) != cpus)
-+		cpu_relax();
++	xenbus_switch_state(dev, XenbusStateInitialised);
++
++	return 0;
++
++ abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	if (message)
++		xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_blkring:
++	blkif_free(info, 0);
++ out:
++	return err;
 +}
 +
-+/*
-+ * smp_call_function_single - Run a function on another CPU
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: Currently unused.
-+ * @wait: If true, wait until function has completed on other CPUs.
-+ *
-+ * Retrurns 0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
 +
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+	int nonatomic, int wait)
++static int setup_blkring(struct xenbus_device *dev,
++			 struct blkfront_info *info)
 +{
-+	/* prevent preemption and reschedule on another processor */
-+	int me = get_cpu();
-+	if (cpu == me) {
-+		WARN_ON(1);
-+		put_cpu();
-+		return -EBUSY;
++	blkif_sring_t *sring;
++	int err;
++
++	info->ring_ref = GRANT_INVALID_REF;
++
++	sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH);
++	if (!sring) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++		return -ENOMEM;
 +	}
-+	spin_lock_bh(&call_lock);
-+	__smp_call_function_single(cpu, func, info, nonatomic, wait);
-+	spin_unlock_bh(&call_lock);
-+	put_cpu();
++	SHARED_RING_INIT(sring);
++	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
++	if (err < 0) {
++		free_page((unsigned long)sring);
++		info->ring.sring = NULL;
++		goto fail;
++	}
++	info->ring_ref = err;
++
++	err = bind_listening_port_to_irqhandler(
++		dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++	if (err <= 0) {
++		xenbus_dev_fatal(dev, err,
++				 "bind_listening_port_to_irqhandler");
++		goto fail;
++	}
++	info->irq = err;
++
 +	return 0;
++fail:
++	blkif_free(info, 0);
++	return err;
 +}
 +
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
++
++/**
++ * Callback received when the backend's state changes.
 + */
-+static void __smp_call_function (void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
++static void backend_changed(struct xenbus_device *dev,
++			    enum xenbus_state backend_state)
 +{
-+	struct call_data_struct data;
-+	int cpus = num_online_cpus()-1;
++	struct blkfront_info *info = dev->dev.driver_data;
++	struct block_device *bd;
 +
-+	if (!cpus)
-+		return;
++	DPRINTK("blkfront:backend_changed.\n");
 +
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateReconfiguring:
++	case XenbusStateReconfigured:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
 +
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++	case XenbusStateConnected:
++		connect(info);
++		break;
 +
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
++	case XenbusStateClosing:
++		bd = bdget(info->dev);
++		if (bd == NULL)
++			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++		down(&bd->bd_sem);
 +#else
-+		barrier();
++		mutex_lock(&bd->bd_mutex);
 +#endif
-+
-+	if (!wait)
-+		return;
-+
-+	while (atomic_read(&data.finished) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
++		if (info->users > 0)
++			xenbus_dev_error(dev, -EBUSY,
++					 "Device in use; refusing to close");
++		else
++			blkfront_closing(dev);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++		up(&bd->bd_sem);
 +#else
-+		barrier();
++		mutex_unlock(&bd->bd_mutex);
 +#endif
++		bdput(bd);
++		break;
++	}
 +}
 +
++
++/* ** Connection ** */
++
++
 +/*
-+ * smp_call_function - run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other
-+ *        CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute func or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ * Actually there are a few legal cases, like panic.
++ * Invoked when the backend is finally 'ready' (and has told produced
++ * the details about the physical device - #sectors, size, etc).
 + */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+			int wait)
++static void connect(struct blkfront_info *info)
 +{
-+	spin_lock(&call_lock);
-+	__smp_call_function(func,info,nonatomic,wait);
-+	spin_unlock(&call_lock);
-+	return 0;
-+}
-+EXPORT_SYMBOL(smp_call_function);
++	unsigned long long sectors;
++	unsigned long sector_size;
++	unsigned int binfo;
++	int err;
 +
-+void smp_stop_cpu(void)
-+{
-+	unsigned long flags;
-+	/*
-+	 * Remove this CPU:
-+	 */
-+	cpu_clear(smp_processor_id(), cpu_online_map);
-+	local_irq_save(flags);
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_restore(flags); 
-+}
++	if ((info->connected == BLKIF_STATE_CONNECTED) ||
++	    (info->connected == BLKIF_STATE_SUSPENDED) )
++		return;
 +
-+static void smp_really_stop_cpu(void *dummy)
-+{
-+	smp_stop_cpu(); 
-+	for (;;) 
-+		halt();
-+} 
++	DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
 +
-+void smp_send_stop(void)
-+{
-+	int nolock = 0;
-+#ifndef CONFIG_XEN
-+	if (reboot_force)
++	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++			    "sectors", "%Lu", &sectors,
++			    "info", "%u", &binfo,
++			    "sector-size", "%lu", &sector_size,
++			    NULL);
++	if (err) {
++		xenbus_dev_fatal(info->xbdev, err,
++				 "reading backend fields at %s",
++				 info->xbdev->otherend);
 +		return;
-+#endif
-+	/* Don't deadlock on the call lock in panic */
-+	if (!spin_trylock(&call_lock)) {
-+		/* ignore locking because we have panicked anyways */
-+		nolock = 1;
 +	}
-+	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
-+	if (!nolock)
-+		spin_unlock(&call_lock);
-+
-+	local_irq_disable();
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable();
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_reschedule_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
-+#endif
-+{
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#else
-+	return IRQ_HANDLED;
-+#endif
-+}
 +
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_call_function_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_call_function_interrupt(void)
-+#endif
-+{
-+	void (*func) (void *info) = call_data->func;
-+	void *info = call_data->info;
-+	int wait = call_data->wait;
++	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++			    "feature-barrier", "%lu", &info->feature_barrier,
++			    NULL);
++	if (err)
++		info->feature_barrier = 0;
 +
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#endif
-+	/*
-+	 * Notify initiating CPU that I've grabbed the data and am
-+	 * about to execute the function
-+	 */
-+	mb();
-+	atomic_inc(&call_data->started);
-+	/*
-+	 * At this point the info structure may be out of scope unless wait==1
-+	 */
-+	exit_idle();
-+	irq_enter();
-+	(*func)(info);
-+	irq_exit();
-+	if (wait) {
-+		mb();
-+		atomic_inc(&call_data->finished);
++	err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
++	if (err) {
++		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
++				 info->xbdev->otherend);
++		return;
 +	}
-+#ifdef CONFIG_XEN
-+	return IRQ_HANDLED;
-+#endif
-+}
-+
-+int safe_smp_processor_id(void)
-+{
-+#ifdef CONFIG_XEN
-+	return smp_processor_id();
-+#else
-+	unsigned apicid, i;
 +
-+	if (disable_apic)
-+		return 0;
++	err = xlvbd_sysfs_addif(info);
++	if (err) {
++		xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s",
++				 info->xbdev->otherend);
++		return;
++	}
 +
-+	apicid = hard_smp_processor_id();
-+	if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
-+		return apicid;
++	(void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
 +
-+	for (i = 0; i < NR_CPUS; ++i) {
-+		if (x86_cpu_to_apicid[i] == apicid)
-+			return i;
-+	}
++	/* Kick pending requests. */
++	spin_lock_irq(&blkif_io_lock);
++	info->connected = BLKIF_STATE_CONNECTED;
++	kick_pending_request_queues(info);
++	spin_unlock_irq(&blkif_io_lock);
 +
-+	/* No entries in x86_cpu_to_apicid?  Either no MPS|ACPI,
-+	 * or called too early.  Either way, we must be CPU 0. */
-+      	if (x86_cpu_to_apicid[0] == BAD_APICID)
-+		return 0;
++	add_disk(info->gd);
 +
-+	return 0; /* Should not happen */
-+#endif
++	info->is_ready = 1;
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/traps-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/traps-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/traps-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,1175 @@
-+/*
-+ *  linux/arch/x86-64/traps.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ */
 +
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'entry.S'.
++/**
++ * Handle the change of state of the backend to Closing.  We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend.  Once is this done, we can switch to Closed in
++ * acknowledgement.
 + */
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/nmi.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+#include <linux/unwind.h>
++static void blkfront_closing(struct xenbus_device *dev)
++{
++	struct blkfront_info *info = dev->dev.driver_data;
++	unsigned long flags;
 +
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/kdebug.h>
-+#include <asm/processor.h>
-+#include <asm/unwind.h>
-+#include <asm/smp.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pda.h>
-+#include <asm/proto.h>
-+#include <asm/nmi.h>
++	DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++
++	if (info->rq == NULL)
++		goto out;
 +
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void double_fault(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void reserved(void);
-+asmlinkage void alignment_check(void);
-+asmlinkage void machine_check(void);
-+asmlinkage void spurious_interrupt_bug(void);
++	spin_lock_irqsave(&blkif_io_lock, flags);
++	/* No more blkif_request(). */
++	blk_stop_queue(info->rq);
++	/* No more gnttab callback work. */
++	gnttab_cancel_free_callback(&info->callback);
++	spin_unlock_irqrestore(&blkif_io_lock, flags);
 +
-+ATOMIC_NOTIFIER_HEAD(die_chain);
-+EXPORT_SYMBOL(die_chain);
++	/* Flush gnttab callback work. Must be done with no locks held. */
++	flush_scheduled_work();
 +
-+int register_die_notifier(struct notifier_block *nb)
++	xlvbd_sysfs_delif(info);
++
++	xlvbd_del(info);
++
++ out:
++	xenbus_frontend_closed(dev);
++}
++
++
++static int blkfront_remove(struct xenbus_device *dev)
 +{
-+	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&die_chain, nb);
++	struct blkfront_info *info = dev->dev.driver_data;
++
++	DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++
++	blkif_free(info, 0);
++
++	kfree(info);
++
++	return 0;
 +}
-+EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
 +
-+int unregister_die_notifier(struct notifier_block *nb)
++
++static inline int GET_ID_FROM_FREELIST(
++	struct blkfront_info *info)
 +{
-+	return atomic_notifier_chain_unregister(&die_chain, nb);
++	unsigned long free = info->shadow_free;
++	BUG_ON(free > BLK_RING_SIZE);
++	info->shadow_free = info->shadow[free].req.id;
++	info->shadow[free].req.id = 0x0fffffee; /* debug */
++	return free;
 +}
-+EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
 +
-+static inline void conditional_sti(struct pt_regs *regs)
++static inline void ADD_ID_TO_FREELIST(
++	struct blkfront_info *info, unsigned long id)
 +{
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
++	info->shadow[id].req.id  = info->shadow_free;
++	info->shadow[id].request = 0;
++	info->shadow_free = id;
 +}
 +
-+static inline void preempt_conditional_sti(struct pt_regs *regs)
++static inline void flush_requests(struct blkfront_info *info)
 +{
-+	preempt_disable();
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
++	int notify;
++
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++
++	if (notify)
++		notify_remote_via_irq(info->irq);
 +}
 +
-+static inline void preempt_conditional_cli(struct pt_regs *regs)
++static void kick_pending_request_queues(struct blkfront_info *info)
 +{
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_disable();
-+	/* Make sure to not schedule here because we could be running
-+	   on an exception stack. */
-+	preempt_enable_no_resched();
++	if (!RING_FULL(&info->ring)) {
++		/* Re-enable calldowns. */
++		blk_start_queue(info->rq);
++		/* Kick things off immediately. */
++		do_blkif_request(info->rq);
++	}
 +}
 +
-+static int kstack_depth_to_print = 12;
-+#ifdef CONFIG_STACK_UNWIND
-+static int call_trace = 1;
-+#else
-+#define call_trace (-1)
-+#endif
++static void blkif_restart_queue(void *arg)
++{
++	struct blkfront_info *info = (struct blkfront_info *)arg;
++	spin_lock_irq(&blkif_io_lock);
++	if (info->connected == BLKIF_STATE_CONNECTED)
++		kick_pending_request_queues(info);
++	spin_unlock_irq(&blkif_io_lock);
++}
 +
-+#ifdef CONFIG_KALLSYMS
-+# include <linux/kallsyms.h>
-+void printk_address(unsigned long address)
++static void blkif_restart_queue_callback(void *arg)
 +{
-+	unsigned long offset = 0, symsize;
-+	const char *symname;
-+	char *modname;
-+	char *delim = ":";
-+	char namebuf[128];
++	struct blkfront_info *info = (struct blkfront_info *)arg;
++	schedule_work(&info->work);
++}
 +
-+	symname = kallsyms_lookup(address, &symsize, &offset,
-+					&modname, namebuf);
-+	if (!symname) {
-+		printk(" [<%016lx>]\n", address);
-+		return;
-+	}
-+	if (!modname)
-+		modname = delim = ""; 		
-+	printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
-+		address, delim, modname, delim, symname, offset, symsize);
++int blkif_open(struct inode *inode, struct file *filep)
++{
++	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++	info->users++;
++	return 0;
 +}
-+#else
-+void printk_address(unsigned long address)
++
++
++int blkif_release(struct inode *inode, struct file *filep)
 +{
-+	printk(" [<%016lx>]\n", address);
++	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++	info->users--;
++	if (info->users == 0) {
++		/* Check whether we have been instructed to close.  We will
++		   have ignored this request initially, as the device was
++		   still mounted. */
++		struct xenbus_device * dev = info->xbdev;
++		enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
++
++		if (state == XenbusStateClosing && info->is_ready)
++			blkfront_closing(dev);
++	}
++	return 0;
 +}
-+#endif
 +
-+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-+					unsigned *usedp, const char **idp)
++
++int blkif_ioctl(struct inode *inode, struct file *filep,
++		unsigned command, unsigned long argument)
 +{
-+#ifndef CONFIG_X86_NO_TSS
-+	static char ids[][8] = {
-+		[DEBUG_STACK - 1] = "#DB",
-+		[NMI_STACK - 1] = "NMI",
-+		[DOUBLEFAULT_STACK - 1] = "#DF",
-+		[STACKFAULT_STACK - 1] = "#SS",
-+		[MCE_STACK - 1] = "#MC",
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		[N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
-+#endif
-+	};
-+	unsigned k;
++	int i;
 +
-+	/*
-+	 * Iterate over all exception stacks, and figure out whether
-+	 * 'stack' is in one of them:
-+	 */
-+	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
-+		unsigned long end;
++	DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
++		      command, (long)argument, inode->i_rdev);
 +
-+		/*
-+		 * set 'end' to the end of the exception stack.
-+		 */
-+		switch (k + 1) {
-+		/*
-+		 * TODO: this block is not needed i think, because
-+		 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
-+		 * properly too.
-+		 */
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		case DEBUG_STACK:
-+			end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
-+			break;
-+#endif
-+		default:
-+			end = per_cpu(orig_ist, cpu).ist[k];
-+			break;
-+		}
-+		/*
-+		 * Is 'stack' above this exception frame's end?
-+		 * If yes then skip to the next frame.
-+		 */
-+		if (stack >= end)
-+			continue;
-+		/*
-+		 * Is 'stack' above this exception frame's start address?
-+		 * If yes then we found the right frame.
-+		 */
-+		if (stack >= end - EXCEPTION_STKSZ) {
-+			/*
-+			 * Make sure we only iterate through an exception
-+			 * stack once. If it comes up for the second time
-+			 * then there's something wrong going on - just
-+			 * break out and return NULL:
-+			 */
-+			if (*usedp & (1U << k))
-+				break;
-+			*usedp |= 1U << k;
-+			*idp = ids[k];
-+			return (unsigned long *)end;
-+		}
-+		/*
-+		 * If this is a debug stack, and if it has a larger size than
-+		 * the usual exception stacks, then 'stack' might still
-+		 * be within the lower portion of the debug stack:
-+		 */
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
-+			unsigned j = N_EXCEPTION_STACKS - 1;
++	switch (command) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++	case HDIO_GETGEO: {
++		struct block_device *bd = inode->i_bdev;
++		struct hd_geometry geo;
++		int ret;
 +
-+			/*
-+			 * Black magic. A large debug stack is composed of
-+			 * multiple exception stack entries, which we
-+			 * iterate through now. Dont look:
-+			 */
-+			do {
-+				++j;
-+				end -= EXCEPTION_STKSZ;
-+				ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
-+			} while (stack < end - EXCEPTION_STKSZ);
-+			if (*usedp & (1U << j))
-+				break;
-+			*usedp |= 1U << j;
-+			*idp = ids[j];
-+			return (unsigned long *)end;
-+		}
-+#endif
++                if (!argument)
++                        return -EINVAL;
++
++		geo.start = get_start_sect(bd);
++		ret = blkif_getgeo(bd, &geo);
++		if (ret)
++			return ret;
++
++		if (copy_to_user((struct hd_geometry __user *)argument, &geo,
++				 sizeof(geo)))
++                        return -EFAULT;
++
++                return 0;
 +	}
 +#endif
-+	return NULL;
++	case CDROMMULTISESSION:
++		DPRINTK("FIXME: support multisession CDs later\n");
++		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++			if (put_user(0, (char __user *)(argument + i)))
++				return -EFAULT;
++		return 0;
++
++	case CDROM_GET_CAPABILITY: {
++		struct blkfront_info *info =
++			inode->i_bdev->bd_disk->private_data;
++		struct gendisk *gd = info->gd;
++		if (gd->flags & GENHD_FL_CD)
++			return 0;
++		return -EINVAL;
++	}
++	default:
++		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
++		  command);*/
++		return -EINVAL; /* same return as native Linux */
++	}
++
++	return 0;
 +}
 +
-+static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++
++int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
 +{
-+	int n = 0;
++	/* We don't have real geometry info, but let's at least return
++	   values consistent with the size of the device */
++	sector_t nsect = get_capacity(bd->bd_disk);
++	sector_t cylinders = nsect;
 +
-+	while (unwind(info) == 0 && UNW_PC(info)) {
-+		n++;
-+		printk_address(UNW_PC(info));
-+		if (arch_unw_user_mode(info))
-+			break;
-+	}
-+	return n;
++	hg->heads = 0xff;
++	hg->sectors = 0x3f;
++	sector_div(cylinders, hg->heads * hg->sectors);
++	hg->cylinders = cylinders;
++	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
++		hg->cylinders = 0xffff;
++	return 0;
 +}
 +
++
 +/*
-+ * x86-64 can have upto three kernel stacks: 
-+ * process stack
-+ * interrupt stack
-+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ * blkif_queue_request
++ *
++ * request block io
++ *
++ * id: for guest use only.
++ * operation: BLKIF_OP_{READ,WRITE,PROBE}
++ * buffer: buffer to read/write into. this should be a
++ *   virtual address in the guest os.
 + */
-+
-+void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++static int blkif_queue_request(struct request *req)
 +{
-+	const unsigned cpu = safe_smp_processor_id();
-+	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
-+	unsigned used = 0;
++	struct blkfront_info *info = req->rq_disk->private_data;
++	unsigned long buffer_mfn;
++	blkif_request_t *ring_req;
++	struct bio *bio;
++	struct bio_vec *bvec;
++	int idx;
++	unsigned long id;
++	unsigned int fsect, lsect;
++	int ref;
++	grant_ref_t gref_head;
 +
-+	printk("\nCall Trace:\n");
++	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++		return 1;
 +
-+	if (!tsk)
-+		tsk = current;
++	if (gnttab_alloc_grant_references(
++		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
++		gnttab_request_free_callback(
++			&info->callback,
++			blkif_restart_queue_callback,
++			info,
++			BLKIF_MAX_SEGMENTS_PER_REQUEST);
++		return 1;
++	}
 +
-+	if (call_trace >= 0) {
-+		int unw_ret = 0;
-+		struct unwind_frame_info info;
++	/* Fill out a communications ring structure. */
++	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
++	id = GET_ID_FROM_FREELIST(info);
++	info->shadow[id].request = (unsigned long)req;
 +
-+		if (regs) {
-+			if (unwind_init_frame_info(&info, tsk, regs) == 0)
-+				unw_ret = show_trace_unwind(&info, NULL);
-+		} else if (tsk == current)
-+			unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
-+		else {
-+			if (unwind_init_blocked(&info, tsk) == 0)
-+				unw_ret = show_trace_unwind(&info, NULL);
++	ring_req->id = id;
++	ring_req->sector_number = (blkif_sector_t)req->sector;
++	ring_req->handle = info->handle;
++
++	ring_req->operation = rq_data_dir(req) ?
++		BLKIF_OP_WRITE : BLKIF_OP_READ;
++	if (blk_barrier_rq(req))
++		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
++
++	ring_req->nr_segments = 0;
++	rq_for_each_bio (bio, req) {
++		bio_for_each_segment (bvec, bio, idx) {
++			BUG_ON(ring_req->nr_segments
++			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
++			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
++			fsect = bvec->bv_offset >> 9;
++			lsect = fsect + (bvec->bv_len >> 9) - 1;
++			/* install a grant reference. */
++			ref = gnttab_claim_grant_reference(&gref_head);
++			BUG_ON(ref == -ENOSPC);
++
++			gnttab_grant_foreign_access_ref(
++				ref,
++				info->xbdev->otherend_id,
++				buffer_mfn,
++				rq_data_dir(req) ? GTF_readonly : 0 );
++
++			info->shadow[id].frame[ring_req->nr_segments] =
++				mfn_to_pfn(buffer_mfn);
++
++			ring_req->seg[ring_req->nr_segments] =
++				(struct blkif_request_segment) {
++					.gref       = ref,
++					.first_sect = fsect,
++					.last_sect  = lsect };
++
++			ring_req->nr_segments++;
 +		}
-+		if (unw_ret > 0) {
-+			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-+				print_symbol("DWARF2 unwinder stuck at %s\n",
-+					     UNW_PC(&info));
-+				if ((long)UNW_SP(&info) < 0) {
-+					printk("Leftover inexact backtrace:\n");
-+					stack = (unsigned long *)UNW_SP(&info);
-+				} else
-+					printk("Full inexact backtrace again:\n");
-+			} else if (call_trace >= 1)
-+				return;
-+			else
-+				printk("Full inexact backtrace again:\n");
-+		} else
-+			printk("Inexact backtrace:\n");
 +	}
 +
-+	/*
-+	 * Print function call entries within a stack. 'cond' is the
-+	 * "end of stackframe" condition, that the 'stack++'
-+	 * iteration will eventually trigger.
-+	 */
-+#define HANDLE_STACK(cond) \
-+	do while (cond) { \
-+		unsigned long addr = *stack++; \
-+		if (kernel_text_address(addr)) { \
-+			/* \
-+			 * If the address is either in the text segment of the \
-+			 * kernel, or in the region which contains vmalloc'ed \
-+			 * memory, it *may* be the address of a calling \
-+			 * routine; if so, print it so that someone tracing \
-+			 * down the cause of the crash will be able to figure \
-+			 * out the call path that was taken. \
-+			 */ \
-+			printk_address(addr); \
-+		} \
-+	} while (0)
++	info->ring.req_prod_pvt++;
 +
-+	/*
-+	 * Print function call entries in all stacks, starting at the
-+	 * current stack address. If the stacks consist of nested
-+	 * exceptions
-+	 */
-+	for ( ; ; ) {
-+		const char *id;
-+		unsigned long *estack_end;
-+		estack_end = in_exception_stack(cpu, (unsigned long)stack,
-+						&used, &id);
++	/* Keep a private copy so we can reissue requests when recovering. */
++	info->shadow[id].req = *ring_req;
 +
-+		if (estack_end) {
-+			printk(" <%s>", id);
-+			HANDLE_STACK (stack < estack_end);
-+			printk(" <EOE>");
-+			/*
-+			 * We link to the next stack via the
-+			 * second-to-last pointer (index -2 to end) in the
-+			 * exception stack:
-+			 */
-+			stack = (unsigned long *) estack_end[-2];
++	gnttab_free_grant_references(gref_head);
++
++	return 0;
++}
++
++/*
++ * do_blkif_request
++ *  read a block; request is in a request queue
++ */
++void do_blkif_request(request_queue_t *rq)
++{
++	struct blkfront_info *info = NULL;
++	struct request *req;
++	int queued;
++
++	DPRINTK("Entered do_blkif_request\n");
++
++	queued = 0;
++
++	while ((req = elv_next_request(rq)) != NULL) {
++		info = req->rq_disk->private_data;
++		if (!blk_fs_request(req)) {
++			end_request(req, 0);
 +			continue;
 +		}
-+		if (irqstack_end) {
-+			unsigned long *irqstack;
-+			irqstack = irqstack_end -
-+				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
 +
-+			if (stack >= irqstack && stack < irqstack_end) {
-+				printk(" <IRQ>");
-+				HANDLE_STACK (stack < irqstack_end);
-+				/*
-+				 * We link to the next stack (which would be
-+				 * the process stack normally) the last
-+				 * pointer (index -1 to end) in the IRQ stack:
-+				 */
-+				stack = (unsigned long *) (irqstack_end[-1]);
-+				irqstack_end = NULL;
-+				printk(" <EOI>");
-+				continue;
-+			}
++		if (RING_FULL(&info->ring))
++			goto wait;
++
++		DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
++			"(%u/%li) buffer:%p [%s]\n",
++			req, req->cmd, (long long)req->sector,
++			req->current_nr_sectors,
++			req->nr_sectors, req->buffer,
++			rq_data_dir(req) ? "write" : "read");
++
++
++		blkdev_dequeue_request(req);
++		if (blkif_queue_request(req)) {
++			blk_requeue_request(rq, req);
++		wait:
++			/* Avoid pointless unplugs. */
++			blk_stop_queue(rq);
++			break;
 +		}
-+		break;
-+	}
 +
-+	/*
-+	 * This prints the process stack:
-+	 */
-+	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
-+#undef HANDLE_STACK
++		queued++;
++	}
 +
-+	printk("\n");
++	if (queued != 0)
++		flush_requests(info);
 +}
 +
-+static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
 +{
-+	unsigned long *stack;
-+	int i;
-+	const int cpu = safe_smp_processor_id();
-+	unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-+	unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++	struct request *req;
++	blkif_response_t *bret;
++	RING_IDX i, rp;
++	unsigned long flags;
++	struct blkfront_info *info = (struct blkfront_info *)dev_id;
++	int uptodate;
 +
-+	// debugging aid: "show_stack(NULL, NULL);" prints the
-+	// back trace for this cpu.
++	spin_lock_irqsave(&blkif_io_lock, flags);
 +
-+	if (rsp == NULL) {
-+		if (tsk)
-+			rsp = (unsigned long *)tsk->thread.rsp;
-+		else
-+			rsp = (unsigned long *)&rsp;
++	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++		spin_unlock_irqrestore(&blkif_io_lock, flags);
++		return IRQ_HANDLED;
 +	}
 +
-+	stack = rsp;
-+	for(i=0; i < kstack_depth_to_print; i++) {
-+		if (stack >= irqstack && stack <= irqstack_end) {
-+			if (stack == irqstack_end) {
-+				stack = (unsigned long *) (irqstack_end[-1]);
-+				printk(" <EOI> ");
++ again:
++	rp = info->ring.sring->rsp_prod;
++	rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++	for (i = info->ring.rsp_cons; i != rp; i++) {
++		unsigned long id;
++		int ret;
++
++		bret = RING_GET_RESPONSE(&info->ring, i);
++		id   = bret->id;
++		req  = (struct request *)info->shadow[id].request;
++
++		blkif_completion(&info->shadow[id]);
++
++		ADD_ID_TO_FREELIST(info, id);
++
++		uptodate = (bret->status == BLKIF_RSP_OKAY);
++		switch (bret->operation) {
++		case BLKIF_OP_WRITE_BARRIER:
++			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
++				printk("blkfront: %s: write barrier op failed\n",
++				       info->gd->disk_name);
++				uptodate = -EOPNOTSUPP;
++				info->feature_barrier = 0;
++			        xlvbd_barrier(info);
 +			}
-+		} else {
-+		if (((long) stack & (THREAD_SIZE-1)) == 0)
++			/* fall through */
++		case BLKIF_OP_READ:
++		case BLKIF_OP_WRITE:
++			if (unlikely(bret->status != BLKIF_RSP_OKAY))
++				DPRINTK("Bad return from blkdev data "
++					"request: %x\n", bret->status);
++
++			ret = end_that_request_first(req, uptodate,
++				req->hard_nr_sectors);
++			BUG_ON(ret);
++			end_that_request_last(req, uptodate);
 +			break;
++		default:
++			BUG();
 +		}
-+		if (i && ((i % 4) == 0))
-+			printk("\n");
-+		printk(" %016lx", *stack++);
-+		touch_nmi_watchdog();
 +	}
-+	show_trace(tsk, regs, rsp);
++
++	info->ring.rsp_cons = i;
++
++	if (i != info->ring.req_prod_pvt) {
++		int more_to_do;
++		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++		if (more_to_do)
++			goto again;
++	} else
++		info->ring.sring->rsp_event = i + 1;
++
++	kick_pending_request_queues(info);
++
++	spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++	return IRQ_HANDLED;
 +}
 +
-+void show_stack(struct task_struct *tsk, unsigned long * rsp)
++static void blkif_free(struct blkfront_info *info, int suspend)
 +{
-+	_show_stack(tsk, NULL, rsp);
++	/* Prevent new requests being issued until we fix things up. */
++	spin_lock_irq(&blkif_io_lock);
++	info->connected = suspend ?
++		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
++	/* No more blkif_request(). */
++	if (info->rq)
++		blk_stop_queue(info->rq);
++	/* No more gnttab callback work. */
++	gnttab_cancel_free_callback(&info->callback);
++	spin_unlock_irq(&blkif_io_lock);
++
++	/* Flush gnttab callback work. Must be done with no locks held. */
++	flush_scheduled_work();
++
++	/* Free resources associated with old device channel. */
++	if (info->ring_ref != GRANT_INVALID_REF) {
++		gnttab_end_foreign_access(info->ring_ref, 
++					  (unsigned long)info->ring.sring);
++		info->ring_ref = GRANT_INVALID_REF;
++		info->ring.sring = NULL;
++	}
++	if (info->irq)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = 0;
 +}
 +
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
++static void blkif_completion(struct blk_shadow *s)
 +{
-+	unsigned long dummy;
-+	show_trace(NULL, NULL, &dummy);
++	int i;
++	for (i = 0; i < s->req.nr_segments; i++)
++		gnttab_end_foreign_access(s->req.seg[i].gref, 0UL);
 +}
 +
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
++static void blkif_recover(struct blkfront_info *info)
 +{
 +	int i;
-+	int in_kernel = !user_mode(regs);
-+	unsigned long rsp;
-+	const int cpu = safe_smp_processor_id(); 
-+	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
-+
-+		rsp = regs->rsp;
++	blkif_request_t *req;
++	struct blk_shadow *copy;
++	int j;
 +
-+	printk("CPU %d ", cpu);
-+	__show_regs(regs);
-+	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-+		cur->comm, cur->pid, task_thread_info(cur), cur);
++	/* Stage 1: Make a safe copy of the shadow state. */
++	copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH);
++	memcpy(copy, info->shadow, sizeof(info->shadow));
 +
-+	/*
-+	 * When in-kernel, we also print out the stack and code at the
-+	 * time of the fault..
-+	 */
-+	if (in_kernel) {
++	/* Stage 2: Set up free list. */
++	memset(&info->shadow, 0, sizeof(info->shadow));
++	for (i = 0; i < BLK_RING_SIZE; i++)
++		info->shadow[i].req.id = i+1;
++	info->shadow_free = info->ring.req_prod_pvt;
++	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
 +
-+		printk("Stack: ");
-+		_show_stack(NULL, regs, (unsigned long*)rsp);
++	/* Stage 3: Find pending requests and requeue them. */
++	for (i = 0; i < BLK_RING_SIZE; i++) {
++		/* Not in use? */
++		if (copy[i].request == 0)
++			continue;
 +
-+		printk("\nCode: ");
-+		if (regs->rip < PAGE_OFFSET)
-+			goto bad;
++		/* Grab a request slot and copy shadow state into it. */
++		req = RING_GET_REQUEST(
++			&info->ring, info->ring.req_prod_pvt);
++		*req = copy[i].req;
 +
-+		for (i=0; i<20; i++) {
-+			unsigned char c;
-+			if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
-+bad:
-+				printk(" Bad RIP value.");
-+				break;
-+			}
-+			printk("%02x ", c);
-+		}
-+	}
-+	printk("\n");
-+}	
++		/* We get a new request id, and must reset the shadow state. */
++		req->id = GET_ID_FROM_FREELIST(info);
++		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
 +
-+void handle_BUG(struct pt_regs *regs)
-+{ 
-+	struct bug_frame f;
-+	long len;
-+	const char *prefix = "";
++		/* Rewrite any grant references invalidated by susp/resume. */
++		for (j = 0; j < req->nr_segments; j++)
++			gnttab_grant_foreign_access_ref(
++				req->seg[j].gref,
++				info->xbdev->otherend_id,
++				pfn_to_mfn(info->shadow[req->id].frame[j]),
++				rq_data_dir((struct request *)
++					    info->shadow[req->id].request) ?
++				GTF_readonly : 0);
++		info->shadow[req->id].req = *req;
 +
-+	if (user_mode(regs))
-+		return; 
-+	if (__copy_from_user(&f, (const void __user *) regs->rip,
-+			     sizeof(struct bug_frame)))
-+		return; 
-+	if (f.filename >= 0 ||
-+	    f.ud2[0] != 0x0f || f.ud2[1] != 0x0b) 
-+		return;
-+	len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
-+	if (len < 0 || len >= PATH_MAX)
-+		f.filename = (int)(long)"unmapped filename";
-+	else if (len > 50) {
-+		f.filename += len - 50;
-+		prefix = "...";
++		info->ring.req_prod_pvt++;
 +	}
-+	printk("----------- [cut here ] --------- [please bite here ] ---------\n");
-+	printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
-+} 
-+
-+#ifdef CONFIG_BUG
-+void out_of_line_bug(void)
-+{ 
-+	BUG(); 
-+} 
-+EXPORT_SYMBOL(out_of_line_bug);
-+#endif
 +
-+static DEFINE_SPINLOCK(die_lock);
-+static int die_owner = -1;
-+static unsigned int die_nest_count;
++	kfree(copy);
 +
-+unsigned __kprobes long oops_begin(void)
-+{
-+	int cpu = safe_smp_processor_id();
-+	unsigned long flags;
++	(void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
 +
-+	/* racy, but better than risking deadlock. */
-+	local_irq_save(flags);
-+	if (!spin_trylock(&die_lock)) { 
-+		if (cpu == die_owner) 
-+			/* nested oops. should stop eventually */;
-+		else
-+			spin_lock(&die_lock);
-+	}
-+	die_nest_count++;
-+	die_owner = cpu;
-+	console_verbose();
-+	bust_spinlocks(1);
-+	return flags;
-+}
++	spin_lock_irq(&blkif_io_lock);
 +
-+void __kprobes oops_end(unsigned long flags)
-+{ 
-+	die_owner = -1;
-+	bust_spinlocks(0);
-+	die_nest_count--;
-+	if (die_nest_count)
-+		/* We still own the lock */
-+		local_irq_restore(flags);
-+	else
-+		/* Nest count reaches zero, release the lock. */
-+		spin_unlock_irqrestore(&die_lock, flags);
-+	if (panic_on_oops)
-+		panic("Fatal exception");
-+}
++	/* Now safe for us to use the shared ring */
++	info->connected = BLKIF_STATE_CONNECTED;
 +
-+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
-+{
-+	static int die_counter;
-+	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
-+#ifdef CONFIG_PREEMPT
-+	printk("PREEMPT ");
-+#endif
-+#ifdef CONFIG_SMP
-+	printk("SMP ");
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	printk("DEBUG_PAGEALLOC");
-+#endif
-+	printk("\n");
-+	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
-+	show_registers(regs);
-+	/* Executive summary in case the oops scrolled away */
-+	printk(KERN_ALERT "RIP ");
-+	printk_address(regs->rip); 
-+	printk(" RSP <%016lx>\n", regs->rsp); 
-+	if (kexec_should_crash(current))
-+		crash_kexec(regs);
-+}
++	/* Send off requeued requests */
++	flush_requests(info);
 +
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+	unsigned long flags = oops_begin();
++	/* Kick any other new requests queued since we resumed */
++	kick_pending_request_queues(info);
 +
-+	handle_BUG(regs);
-+	__die(str, regs, err);
-+	oops_end(flags);
-+	do_exit(SIGSEGV); 
++	spin_unlock_irq(&blkif_io_lock);
 +}
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+void __kprobes die_nmi(char *str, struct pt_regs *regs)
++int blkfront_is_ready(struct xenbus_device *dev)
 +{
-+	unsigned long flags = oops_begin();
++	struct blkfront_info *info = dev->dev.driver_data;
 +
-+	/*
-+	 * We are in trouble anyway, lets at least try
-+	 * to get a message out.
-+	 */
-+	printk(str, safe_smp_processor_id());
-+	show_registers(regs);
-+	if (kexec_should_crash(current))
-+		crash_kexec(regs);
-+	if (panic_on_timeout || panic_on_oops)
-+		panic("nmi watchdog");
-+	printk("console shuts up ...\n");
-+	oops_end(flags);
-+	nmi_exit();
-+	local_irq_enable();
-+	do_exit(SIGSEGV);
++	return info->is_ready;
 +}
-+#endif
 +
-+static void __kprobes do_trap(int trapnr, int signr, char *str,
-+			      struct pt_regs * regs, long error_code,
-+			      siginfo_t *info)
-+{
-+	struct task_struct *tsk = current;
 +
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = trapnr;
++/* ** Driver Registration ** */
 +
-+	if (user_mode(regs)) {
-+		if (exception_trace && unhandled_signal(tsk, signr))
-+			printk(KERN_INFO
-+			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
-+			       tsk->comm, tsk->pid, str,
-+			       regs->rip, regs->rsp, error_code); 
 +
-+		if (info)
-+			force_sig_info(signr, info, tsk);
-+		else
-+			force_sig(signr, tsk);
-+		return;
-+	}
++static const struct xenbus_device_id blkfront_ids[] = {
++	{ "vbd" },
++	{ "" }
++};
++MODULE_ALIAS("xen:vbd");
 +
++static struct xenbus_driver blkfront = {
++	.name = "vbd",
++	.owner = THIS_MODULE,
++	.ids = blkfront_ids,
++	.probe = blkfront_probe,
++	.remove = blkfront_remove,
++	.resume = blkfront_resume,
++	.otherend_changed = backend_changed,
++	.is_ready = blkfront_is_ready,
++};
 +
-+	/* kernel trap */ 
-+	{	     
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->rip);
-+		if (fixup)
-+			regs->rip = fixup->fixup;
-+		else	
-+			die(str, regs, error_code);
-+		return;
-+	}
-+}
 +
-+#define DO_ERROR(trapnr, signr, str, name) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+							== NOTIFY_STOP) \
-+		return; \
-+	conditional_sti(regs);						\
-+	do_trap(trapnr, signr, str, regs, error_code, NULL); \
-+}
++static int __init xlblk_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+							== NOTIFY_STOP) \
-+		return; \
-+	conditional_sti(regs);						\
-+	do_trap(trapnr, signr, str, regs, error_code, &info); \
++	return xenbus_register_frontend(&blkfront);
 +}
++module_init(xlblk_init);
 +
-+DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
-+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
-+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR(18, SIGSEGV, "reserved", reserved)
 +
-+/* Runs on IST stack */
-+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
++static void __exit xlblk_exit(void)
 +{
-+	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-+			12, SIGBUS) == NOTIFY_STOP)
-+		return;
-+	preempt_conditional_sti(regs);
-+	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
-+	preempt_conditional_cli(regs);
++	return xenbus_unregister_driver(&blkfront);
 +}
++module_exit(xlblk_exit);
 +
-+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkfront/block.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkfront/block.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,158 @@
++/******************************************************************************
++ * block.h
++ * 
++ * Shared definitions between all levels of XenLinux Virtual block devices.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_BLOCK_H__
++#define __XEN_DRIVERS_BLOCK_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/hdreg.h>
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/ring.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++
++#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
++
++#if 0
++#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
++#else
++#define DPRINTK_IOCTL(_f, _a...) ((void)0)
++#endif
++
++struct xlbd_type_info
 +{
-+	static const char str[] = "double fault";
-+	struct task_struct *tsk = current;
++	int partn_shift;
++	int disks_per_major;
++	char *devname;
++	char *diskname;
++};
 +
-+	/* Return not checked because double check cannot be ignored */
-+	notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++struct xlbd_major_info
++{
++	int major;
++	int index;
++	int usage;
++	struct xlbd_type_info *type;
++};
 +
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 8;
++struct blk_shadow {
++	blkif_request_t req;
++	unsigned long request;
++	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
 +
-+	/* This is always a kernel trap and never fixable (and thus must
-+	   never return). */
-+	for (;;)
-+		die(str, regs, error_code);
-+}
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
 +
-+asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
-+						long error_code)
++/*
++ * We have one of these per vbd, whether ide, scsi or 'other'.  They
++ * hang in private_data off the gendisk structure. We may end up
++ * putting all kinds of interesting stuff here :-)
++ */
++struct blkfront_info
 +{
-+	struct task_struct *tsk = current;
-+
-+	conditional_sti(regs);
++	struct xenbus_device *xbdev;
++	dev_t dev;
++ 	struct gendisk *gd;
++	int vdevice;
++	blkif_vdev_t handle;
++	int connected;
++	int ring_ref;
++	blkif_front_ring_t ring;
++	unsigned int irq;
++	struct xlbd_major_info *mi;
++	request_queue_t *rq;
++	struct work_struct work;
++	struct gnttab_free_callback callback;
++	struct blk_shadow shadow[BLK_RING_SIZE];
++	unsigned long shadow_free;
++	int feature_barrier;
++	int is_ready;
 +
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 13;
++	/**
++	 * The number of people holding this device open.  We won't allow a
++	 * hot-unplug unless this is 0.
++	 */
++	int users;
++};
 +
-+	if (user_mode(regs)) {
-+		if (exception_trace && unhandled_signal(tsk, SIGSEGV))
-+			printk(KERN_INFO
-+		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
-+			       tsk->comm, tsk->pid,
-+			       regs->rip, regs->rsp, error_code); 
++extern spinlock_t blkif_io_lock;
 +
-+		force_sig(SIGSEGV, tsk);
-+		return;
-+	} 
++extern int blkif_open(struct inode *inode, struct file *filep);
++extern int blkif_release(struct inode *inode, struct file *filep);
++extern int blkif_ioctl(struct inode *inode, struct file *filep,
++		       unsigned command, unsigned long argument);
++extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
++extern int blkif_check(dev_t dev);
++extern int blkif_revalidate(dev_t dev);
++extern void do_blkif_request (request_queue_t *rq);
 +
-+	/* kernel gp */
-+	{
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->rip);
-+		if (fixup) {
-+			regs->rip = fixup->fixup;
-+			return;
-+		}
-+		if (notify_die(DIE_GPF, "general protection fault", regs,
-+					error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		die("general protection fault", regs, error_code);
-+	}
-+}
++/* Virtual block-device subsystem. */
++/* Note that xlvbd_add doesn't call add_disk for you: you're expected
++   to call add_disk on info->gd once the disk is properly connected
++   up. */
++int xlvbd_add(blkif_sector_t capacity, int device,
++	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
++void xlvbd_del(struct blkfront_info *info);
++int xlvbd_barrier(struct blkfront_info *info);
 +
-+static __kprobes void
-+mem_parity_error(unsigned char reason, struct pt_regs * regs)
++#ifdef CONFIG_SYSFS
++int xlvbd_sysfs_addif(struct blkfront_info *info);
++void xlvbd_sysfs_delif(struct blkfront_info *info);
++#else
++static inline int xlvbd_sysfs_addif(struct blkfront_info *info)
 +{
-+	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-+	printk("You probably have a hardware problem with your RAM chips\n");
-+
-+#if 0 /* XEN */
-+	/* Clear and disable the memory parity error line. */
-+	reason = (reason & 0xf) | 4;
-+	outb(reason, 0x61);
-+#endif /* XEN */
++	return 0;
 +}
 +
-+static __kprobes void
-+io_check_error(unsigned char reason, struct pt_regs * regs)
++static inline void xlvbd_sysfs_delif(struct blkfront_info *info)
 +{
-+	printk("NMI: IOCK error (debug interrupt?)\n");
-+	show_registers(regs);
-+
-+#if 0 /* XEN */
-+	/* Re-enable the IOCK line, wait for a few seconds */
-+	reason = (reason & 0xf) | 8;
-+	outb(reason, 0x61);
-+	mdelay(2000);
-+	reason &= ~8;
-+	outb(reason, 0x61);
-+#endif /* XEN */
++	;
 +}
++#endif
 +
-+static __kprobes void
-+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{	printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
-+}
++#endif /* __XEN_DRIVERS_BLOCK_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blkfront/vbd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blkfront/vbd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,460 @@
++/******************************************************************************
++ * vbd.c
++ * 
++ * XenLinux virtual block-device driver (xvd).
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+/* Runs on IST stack. This code must keep interrupts off all the time.
-+   Nested NMIs are prevented by the CPU. */
-+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
-+{
-+	unsigned char reason = 0;
-+	int cpu;
++#include "block.h"
++#include <linux/blkdev.h>
++#include <linux/list.h>
 +
-+	cpu = smp_processor_id();
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+	/* Only the BSP gets external NMIs from the system.  */
-+	if (!cpu)
-+		reason = get_nmi_reason();
++#define BLKIF_MAJOR(dev) ((dev)>>8)
++#define BLKIF_MINOR(dev) ((dev) & 0xff)
 +
-+	if (!(reason & 0xc0)) {
-+		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+								== NOTIFY_STOP)
-+			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/*
-+		 * Ok, so this is none of the documented NMI sources,
-+		 * so it must be the NMI watchdog.
-+		 */
-+		if (nmi_watchdog > 0) {
-+			nmi_watchdog_tick(regs,reason);
-+			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
-+		return;
-+	}
-+	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+		return; 
++#define EXT_SHIFT 28
++#define EXTENDED (1<<EXT_SHIFT)
++#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
++#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
 +
-+	/* AK: following checks seem to be broken on modern chipsets. FIXME */
++/*
++ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
++ * potentially combinations of the two) in the naming scheme and in a few other
++ * places.
++ */
 +
-+	if (reason & 0x80)
-+		mem_parity_error(reason, regs);
-+	if (reason & 0x40)
-+		io_check_error(reason, regs);
-+}
++#define NUM_IDE_MAJORS 10
++#define NUM_SCSI_MAJORS 17
++#define NUM_VBD_MAJORS 2
 +
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
-+{
-+	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
-+		return;
-+	}
-+	preempt_conditional_sti(regs);
-+	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-+	preempt_conditional_cli(regs);
-+}
++static struct xlbd_type_info xlbd_ide_type = {
++	.partn_shift = 6,
++	.disks_per_major = 2,
++	.devname = "ide",
++	.diskname = "hd",
++};
 +
-+/* Help handler running on IST stack to switch back to user stack
-+   for scheduling or signal handling. The actual stack switch is done in
-+   entry.S */
-+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
-+{
-+	struct pt_regs *regs = eregs;
-+	/* Did already sync */
-+	if (eregs == (struct pt_regs *)eregs->rsp)
-+		;
-+	/* Exception from user space */
-+	else if (user_mode(eregs))
-+		regs = task_pt_regs(current);
-+	/* Exception from kernel and interrupts are enabled. Move to
-+ 	   kernel process stack. */
-+	else if (eregs->eflags & X86_EFLAGS_IF)
-+		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
-+	if (eregs != regs)
-+		*regs = *eregs;
-+	return regs;
-+}
++static struct xlbd_type_info xlbd_scsi_type = {
++	.partn_shift = 4,
++	.disks_per_major = 16,
++	.devname = "sd",
++	.diskname = "sd",
++};
 +
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_debug(struct pt_regs * regs,
-+				   unsigned long error_code)
-+{
-+	unsigned long condition;
-+	struct task_struct *tsk = current;
-+	siginfo_t info;
++static struct xlbd_type_info xlbd_vbd_type = {
++	.partn_shift = 4,
++	.disks_per_major = 16,
++	.devname = "xvd",
++	.diskname = "xvd",
++};
 +
-+	get_debugreg(condition, 6);
++static struct xlbd_type_info xlbd_vbd_type_ext = {
++	.partn_shift = 8,
++	.disks_per_major = 256,
++	.devname = "xvd",
++	.diskname = "xvd",
++};
 +
-+	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+						SIGTRAP) == NOTIFY_STOP)
-+		return;
++static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
++					 NUM_VBD_MAJORS];
 +
-+	preempt_conditional_sti(regs);
++#define XLBD_MAJOR_IDE_START	0
++#define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
++#define XLBD_MAJOR_VBD_START	(NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
 +
-+	/* Mask out spurious debug traps due to lazy DR7 setting */
-+	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+		if (!tsk->thread.debugreg7) { 
-+			goto clear_dr7;
-+		}
-+	}
++#define XLBD_MAJOR_IDE_RANGE	XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
++#define XLBD_MAJOR_SCSI_RANGE	XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
++#define XLBD_MAJOR_VBD_RANGE	XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
 +
-+	tsk->thread.debugreg6 = condition;
++static struct block_device_operations xlvbd_block_fops =
++{
++	.owner = THIS_MODULE,
++	.open = blkif_open,
++	.release = blkif_release,
++	.ioctl  = blkif_ioctl,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	.getgeo = blkif_getgeo
++#endif
++};
 +
-+	/* Mask out spurious TF errors due to lazy TF clearing */
-+	if (condition & DR_STEP) {
-+		/*
-+		 * The TF error should be masked out only if the current
-+		 * process is not traced and if the TRAP flag has been set
-+		 * previously by a tracing process (condition detected by
-+		 * the PT_DTRACE flag); remember that the i386 TRAP flag
-+		 * can be modified by the process itself in user mode,
-+		 * allowing programs to debug themselves without the ptrace()
-+		 * interface.
-+		 */
-+                if (!user_mode(regs))
-+                       goto clear_TF_reenable;
-+		/*
-+		 * Was the TF flag set by a debugger? If so, clear it now,
-+		 * so that register information is correct.
-+		 */
-+		if (tsk->ptrace & PT_DTRACE) {
-+			regs->eflags &= ~TF_MASK;
-+			tsk->ptrace &= ~PT_DTRACE;
-+		}
-+	}
++DEFINE_SPINLOCK(blkif_io_lock);
 +
-+	/* Ok, finally something we can handle */
-+	tsk->thread.trap_no = 1;
-+	tsk->thread.error_code = error_code;
-+	info.si_signo = SIGTRAP;
-+	info.si_errno = 0;
-+	info.si_code = TRAP_BRKPT;
-+	info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
-+	force_sig_info(SIGTRAP, &info, tsk);
++static struct xlbd_major_info *
++xlbd_alloc_major_info(int major, int minor, int index)
++{
++	struct xlbd_major_info *ptr;
++	int do_register;
 +
-+clear_dr7:
-+	set_debugreg(0UL, 7);
-+	preempt_conditional_cli(regs);
-+	return;
++	ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
++	if (ptr == NULL)
++		return NULL;
 +
-+clear_TF_reenable:
-+	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+	regs->eflags &= ~TF_MASK;
-+	preempt_conditional_cli(regs);
-+}
++	ptr->major = major;
++	do_register = 1;
 +
-+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-+{
-+	const struct exception_table_entry *fixup;
-+	fixup = search_exception_tables(regs->rip);
-+	if (fixup) {
-+		regs->rip = fixup->fixup;
-+		return 1;
-+	}
-+	notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
-+	/* Illegal floating point operation in the kernel */
-+	current->thread.trap_no = trapnr;
-+	die(str, regs, 0);
-+	return 0;
-+}
++	switch (index) {
++	case XLBD_MAJOR_IDE_RANGE:
++		ptr->type = &xlbd_ide_type;
++		ptr->index = index - XLBD_MAJOR_IDE_START;
++		break;
++	case XLBD_MAJOR_SCSI_RANGE:
++		ptr->type = &xlbd_scsi_type;
++		ptr->index = index - XLBD_MAJOR_SCSI_START;
++		break;
++	case XLBD_MAJOR_VBD_RANGE:
++		ptr->index = 0;
++		if ((index - XLBD_MAJOR_VBD_START) == 0)
++			ptr->type = &xlbd_vbd_type;
++		else
++			ptr->type = &xlbd_vbd_type_ext;
 +
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
-+{
-+	void __user *rip = (void __user *)(regs->rip);
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short cwd, swd;
++		/* 
++		 * if someone already registered block major 202,
++		 * don't try to register it again
++		 */
++		if (major_info[XLBD_MAJOR_VBD_START] != NULL)
++			do_register = 0;
++		break;
++	}
 +
-+	conditional_sti(regs);
-+	if (!user_mode(regs) &&
-+	    kernel_math_error(regs, "kernel x87 math error", 16))
-+		return;
++	if (do_register) {
++		if (register_blkdev(ptr->major, ptr->type->devname)) {
++			kfree(ptr);
++			return NULL;
++		}
 +
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 16;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = rip;
-+	/*
-+	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-+	 * C1 reg you need in case of a stack fault, 0x040 is the stack
-+	 * fault bit.  We should only be taking one exception at a time,
-+	 * so if this combination doesn't produce any single exception,
-+	 * then we have a bad program that isn't synchronizing its FPU usage
-+	 * and it will suffer the consequences since we won't be able to
-+	 * fully reproduce the context of the exception
-+	 */
-+	cwd = get_fpu_cwd(task);
-+	swd = get_fpu_swd(task);
-+	switch (swd & ~cwd & 0x3f) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			/*
-+			 * swd & 0x240 == 0x040: Stack Underflow
-+			 * swd & 0x240 == 0x240: Stack Overflow
-+			 * User must clear the SF bit (0x40) if set
-+			 */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
++		printk("xen-vbd: registered block device major %i\n", ptr->major);
 +	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
 +
-+asmlinkage void bad_intr(void)
-+{
-+	printk("bad interrupt"); 
++	major_info[index] = ptr;
++	return ptr;
 +}
 +
-+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++static struct xlbd_major_info *
++xlbd_get_major_info(int major, int minor, int vdevice)
 +{
-+	void __user *rip = (void __user *)(regs->rip);
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short mxcsr;
-+
-+	conditional_sti(regs);
-+	if (!user_mode(regs) &&
-+        	kernel_math_error(regs, "kernel simd math error", 19))
-+		return;
++	struct xlbd_major_info *mi;
++	int index;
 +
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 19;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = rip;
-+	/*
-+	 * The SIMD FPU exceptions are handled a little differently, as there
-+	 * is only a single status/control register.  Thus, to determine which
-+	 * unmasked exception was caught we must mask the exception mask bits
-+	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+	 */
-+	mxcsr = get_fpu_mxcsr(task);
-+	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
++	switch (major) {
++	case IDE0_MAJOR: index = 0; break;
++	case IDE1_MAJOR: index = 1; break;
++	case IDE2_MAJOR: index = 2; break;
++	case IDE3_MAJOR: index = 3; break;
++	case IDE4_MAJOR: index = 4; break;
++	case IDE5_MAJOR: index = 5; break;
++	case IDE6_MAJOR: index = 6; break;
++	case IDE7_MAJOR: index = 7; break;
++	case IDE8_MAJOR: index = 8; break;
++	case IDE9_MAJOR: index = 9; break;
++	case SCSI_DISK0_MAJOR: index = 10; break;
++	case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
++		index = 11 + major - SCSI_DISK1_MAJOR;
++		break;
++        case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
++                index = 18 + major - SCSI_DISK8_MAJOR;
++                break;
++        case SCSI_CDROM_MAJOR: index = 26; break;
++        default:
++		if (!VDEV_IS_EXTENDED(vdevice))
++			index = 27;
++		else
++			index = 28;
++		break;
 +	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
 +
-+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
-+{
++	mi = ((major_info[index] != NULL) ? major_info[index] :
++	      xlbd_alloc_major_info(major, minor, index));
++	if (mi)
++		mi->usage++;
++	return mi;
 +}
 +
-+#if 0
-+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++static void
++xlbd_put_major_info(struct xlbd_major_info *mi)
 +{
++	mi->usage--;
++	/* XXX: release major if 0 */
 +}
-+#endif
 +
-+asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
++static int
++xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 +{
-+}
++	request_queue_t *rq;
 +
-+/*
-+ *  'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ */
-+asmlinkage void math_state_restore(void)
-+{
-+	struct task_struct *me = current;
-+        /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
++	if (rq == NULL)
++		return -1;
 +
-+	if (!used_math())
-+		init_fpu(me);
-+	restore_fpu_checking(&me->thread.i387.fxsave);
-+	task_thread_info(me)->status |= TS_USEDFPU;
-+}
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++	elevator_init(rq, "noop");
++#else
++	elevator_init(rq, &elevator_noop);
++#endif
 +
++	/* Hard sector size and max sectors impersonate the equiv. hardware. */
++	blk_queue_hardsect_size(rq, sector_size);
++	blk_queue_max_sectors(rq, 512);
 +
-+/*
-+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
-+ * specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+        {  0, 0|4, __KERNEL_CS, (unsigned long)divide_error               },
-+        {  1, 0|4, __KERNEL_CS, (unsigned long)debug                      },
-+        {  3, 3|4, __KERNEL_CS, (unsigned long)int3                       },
-+        {  4, 3|4, __KERNEL_CS, (unsigned long)overflow                   },
-+        {  5, 0|4, __KERNEL_CS, (unsigned long)bounds                     },
-+        {  6, 0|4, __KERNEL_CS, (unsigned long)invalid_op                 },
-+        {  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available       },
-+        {  9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
-+        { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS                },
-+        { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present        },
-+        { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment              },
-+        { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection         },
-+        { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault                 },
-+        { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug     },
-+        { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error          },
-+        { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check            },
-+#ifdef CONFIG_X86_MCE
-+        { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check              },
-+#endif
-+        { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error     },
-+#ifdef CONFIG_IA32_EMULATION
-+	{ IA32_SYSCALL_VECTOR, 3|4, __KERNEL_CS, (unsigned long)ia32_syscall},
-+#endif
-+        {  0, 0,           0, 0                                              }
-+};
++	/* Each segment in a request is up to an aligned page in size. */
++	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
++	blk_queue_max_segment_size(rq, PAGE_SIZE);
 +
-+void __init trap_init(void)
-+{
-+        int ret;
++	/* Ensure a merged request will fit in a single I/O ring slot. */
++	blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++	blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 +
-+        ret = HYPERVISOR_set_trap_table(trap_table);
-+        
-+        if (ret) 
-+                printk("HYPERVISOR_set_trap_table faild: error %d\n",
-+                       ret);
++	/* Make sure buffer addresses are sector-aligned. */
++	blk_queue_dma_alignment(rq, 511);
 +
-+	/*
-+	 * Should be a barrier for any external CPU state.
-+	 */
-+	cpu_init();
++	/* Make sure we don't use bounce buffers. */
++	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
++
++	gd->queue = rq;
++
++	return 0;
 +}
 +
-+void smp_trap_init(trap_info_t *trap_ctxt)
++static int
++xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice,
++		    u16 vdisk_info, u16 sector_size,
++		    struct blkfront_info *info)
 +{
-+	trap_info_t *t = trap_table;
++	struct gendisk *gd;
++	struct xlbd_major_info *mi;
++	int nr_minors = 1;
++	int err = -ENODEV;
++	unsigned int offset;
 +
-+	for (t = trap_table; t->address; t++) {
-+		trap_ctxt[t->vector].flags = t->flags;
-+		trap_ctxt[t->vector].cs = t->cs;
-+		trap_ctxt[t->vector].address = t->address;
-+	}
-+}
++	BUG_ON(info->gd != NULL);
++	BUG_ON(info->mi != NULL);
++	BUG_ON(info->rq != NULL);
 +
++	mi = xlbd_get_major_info(major, minor, vdevice);
++	if (mi == NULL)
++		goto out;
++	info->mi = mi;
 +
-+/* Actual parsing is done early in setup.c. */
-+static int __init oops_dummy(char *s)
-+{ 
-+	panic_on_oops = 1;
-+	return 1;
-+} 
-+__setup("oops=", oops_dummy); 
++	if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
++		nr_minors = 1 << mi->type->partn_shift;
 +
-+static int __init kstack_setup(char *s)
-+{
-+	kstack_depth_to_print = simple_strtoul(s,NULL,0);
-+	return 1;
-+}
-+__setup("kstack=", kstack_setup);
++	gd = alloc_disk(nr_minors);
++	if (gd == NULL)
++		goto out;
 +
-+#ifdef CONFIG_STACK_UNWIND
-+static int __init call_trace_setup(char *s)
-+{
-+	if (strcmp(s, "old") == 0)
-+		call_trace = -1;
-+	else if (strcmp(s, "both") == 0)
-+		call_trace = 0;
-+	else if (strcmp(s, "newfallback") == 0)
-+		call_trace = 1;
-+	else if (strcmp(s, "new") == 0)
-+		call_trace = 2;
-+	return 1;
-+}
-+__setup("call_trace=", call_trace_setup);
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/vmlinux.lds.S tmp-linux-2.6-xen.patch/arch/x86_64/kernel/vmlinux.lds.S
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/vmlinux.lds.S	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/vmlinux.lds.S	2007-10-14 01:51:15.000000000 +0200
-@@ -13,6 +13,13 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86
- OUTPUT_ARCH(i386:x86-64)
- ENTRY(phys_startup_64)
- jiffies_64 = jiffies;
-+PHDRS {
-+	text PT_LOAD FLAGS(5);	/* R_E */
-+	data PT_LOAD FLAGS(7);	/* RWE */
-+	user PT_LOAD FLAGS(7);	/* RWE */
-+	data.init PT_LOAD FLAGS(7);	/* RWE */
-+	note PT_NOTE FLAGS(4);	/* R__ */
-+}
- SECTIONS
- {
-   . = __START_KERNEL;
-@@ -31,7 +38,7 @@ SECTIONS
- 	KPROBES_TEXT
- 	*(.fixup)
- 	*(.gnu.warning)
--	} = 0x9090
-+	} :text = 0x9090
-   				/* out-of-line lock text */
-   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
- 
-@@ -57,17 +64,10 @@ SECTIONS
-   .data : AT(ADDR(.data) - LOAD_OFFSET) {
- 	*(.data)
- 	CONSTRUCTORS
--	}
-+	} :data
- 
-   _edata = .;			/* End of data section */
- 
--  __bss_start = .;		/* BSS */
--  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
--	*(.bss.page_aligned)	
--	*(.bss)
--	}
--  __bss_stop = .;
--
-   . = ALIGN(PAGE_SIZE);
-   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
-   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
-@@ -89,7 +89,7 @@ SECTIONS
- #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
- 
-   . = VSYSCALL_ADDR;
--  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
-+  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
-   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
- 
-   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
-@@ -132,7 +132,7 @@ SECTIONS
-   . = ALIGN(8192);		/* init_task */
-   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
- 	*(.data.init_task)
--  }
-+  }:data.init
- 
-   . = ALIGN(4096);
-   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
-@@ -222,6 +222,14 @@ SECTIONS
-   . = ALIGN(4096);
-   __nosave_end = .;
- 
-+  __bss_start = .;		/* BSS */
-+  . = ALIGN(4096);
-+  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-+	*(.bss.page_aligned)
-+	*(.bss)
++	offset =  mi->index * mi->type->disks_per_major +
++			(minor >> mi->type->partn_shift);
++	if (nr_minors > 1) {
++		if (offset < 26) {
++			sprintf(gd->disk_name, "%s%c",
++				 mi->type->diskname, 'a' + offset );
++		}
++		else {
++			sprintf(gd->disk_name, "%s%c%c",
++				mi->type->diskname,
++				'a' + ((offset/26)-1), 'a' + (offset%26) );
++		}
++	}
++	else {
++		if (offset < 26) {
++			sprintf(gd->disk_name, "%s%c%d",
++				mi->type->diskname,
++				'a' + offset,
++				minor & ((1 << mi->type->partn_shift) - 1));
++		}
++		else {
++			sprintf(gd->disk_name, "%s%c%c%d",
++				mi->type->diskname,
++				'a' + ((offset/26)-1), 'a' + (offset%26),
++				minor & ((1 << mi->type->partn_shift) - 1));
++		}
 +	}
-+  __bss_stop = .;
 +
-   _end = . ;
- 
-   /* Sections to be discarded */
-@@ -235,4 +243,6 @@ SECTIONS
-   STABS_DEBUG
- 
-   DWARF_DEBUG
++	gd->major = mi->major;
++	gd->first_minor = minor;
++	gd->fops = &xlvbd_block_fops;
++	gd->private_data = info;
++	gd->driverfs_dev = &(info->xbdev->dev);
++	set_capacity(gd, capacity);
 +
-+  NOTES
- }
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/vsyscall-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/kernel/vsyscall-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/vsyscall-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/vsyscall-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,239 @@
-+/*
-+ *  linux/arch/x86_64/kernel/vsyscall.c
-+ *
-+ *  Copyright (C) 2001 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *  Copyright 2003 Andi Kleen, SuSE Labs.
-+ *
-+ *  Thanks to hpa at transmeta.com for some useful hint.
-+ *  Special thanks to Ingo Molnar for his early experience with
-+ *  a different vsyscall implementation for Linux/IA32 and for the name.
-+ *
-+ *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
-+ *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
-+ *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
-+ *  jumping out of line if necessary. We cannot add more with this
-+ *  mechanism because older kernels won't return -ENOSYS.
-+ *  If we want more than four we need a vDSO.
-+ *
-+ *  Note: the concept clashes with user mode linux. If you use UML and
-+ *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
-+ */
++	if (xlvbd_init_blk_queue(gd, sector_size)) {
++		del_gendisk(gd);
++		goto out;
++	}
 +
-+#include <linux/time.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/timer.h>
-+#include <linux/seqlock.h>
-+#include <linux/jiffies.h>
-+#include <linux/sysctl.h>
++	info->rq = gd->queue;
++	info->gd = gd;
 +
-+#include <asm/vsyscall.h>
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/fixmap.h>
-+#include <asm/errno.h>
-+#include <asm/io.h>
++	if (info->feature_barrier)
++		xlvbd_barrier(info);
 +
-+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++	if (vdisk_info & VDISK_READONLY)
++		set_disk_ro(gd, 1);
 +
-+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
-+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++	if (vdisk_info & VDISK_REMOVABLE)
++		gd->flags |= GENHD_FL_REMOVABLE;
 +
-+#include <asm/unistd.h>
++	if (vdisk_info & VDISK_CDROM)
++		gd->flags |= GENHD_FL_CD;
 +
-+static __always_inline void timeval_normalize(struct timeval * tv)
-+{
-+	time_t __sec;
++	return 0;
 +
-+	__sec = tv->tv_usec / 1000000;
-+	if (__sec) {
-+		tv->tv_usec %= 1000000;
-+		tv->tv_sec += __sec;
-+	}
++ out:
++	if (mi)
++		xlbd_put_major_info(mi);
++	info->mi = NULL;
++	return err;
 +}
 +
-+static __always_inline void do_vgettimeofday(struct timeval * tv)
++int
++xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
++	  u16 sector_size, struct blkfront_info *info)
 +{
-+	long sequence, t;
-+	unsigned long sec, usec;
++	struct block_device *bd;
++	int err = 0;
++	int major, minor;
 +
-+	do {
-+		sequence = read_seqbegin(&__xtime_lock);
-+		
-+		sec = __xtime.tv_sec;
-+		usec = (__xtime.tv_nsec / 1000) +
-+			(__jiffies - __wall_jiffies) * (1000000 / HZ);
++	if ((vdevice>>EXT_SHIFT) > 1) {
++		/* this is above the extended range; something is wrong */
++		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
++		return -ENODEV;
++	}
 +
-+		if (__vxtime.mode != VXTIME_HPET) {
-+			t = get_cycles_sync();
-+			if (t < __vxtime.last_tsc)
-+				t = __vxtime.last_tsc;
-+			usec += ((t - __vxtime.last_tsc) *
-+				 __vxtime.tsc_quot) >> 32;
-+			/* See comment in x86_64 do_gettimeofday. */
-+		} else {
-+			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
-+				  __vxtime.last) * __vxtime.quot) >> 32;
-+		}
-+	} while (read_seqretry(&__xtime_lock, sequence));
++	if (!VDEV_IS_EXTENDED(vdevice)) {
++		major = BLKIF_MAJOR(vdevice);
++		minor = BLKIF_MINOR(vdevice);
++	}
++	else {
++		major = 202;
++		minor = BLKIF_MINOR_EXT(vdevice);
++	}
 +
-+	tv->tv_sec = sec + usec / 1000000;
-+	tv->tv_usec = usec % 1000000;
-+}
++	info->dev = MKDEV(major, minor);
++	bd = bdget(info->dev);
++	if (bd == NULL)
++		return -ENODEV;
 +
-+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-+static __always_inline void do_get_tz(struct timezone * tz)
-+{
-+	*tz = __sys_tz;
-+}
++	err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info,
++				  sector_size, info);
 +
-+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-+{
-+	int ret;
-+	asm volatile("vsysc2: syscall"
-+		: "=a" (ret)
-+		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
-+	return ret;
++	bdput(bd);
++	return err;
 +}
 +
-+static __always_inline long time_syscall(long *t)
++void
++xlvbd_del(struct blkfront_info *info)
 +{
-+	long secs;
-+	asm volatile("vsysc1: syscall"
-+		: "=a" (secs)
-+		: "0" (__NR_time),"D" (t) : __syscall_clobber);
-+	return secs;
-+}
++	if (info->mi == NULL)
++		return;
 +
-+int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
-+{
-+	if (!__sysctl_vsyscall)
-+		return gettimeofday(tv,tz);
-+	if (tv)
-+		do_vgettimeofday(tv);
-+	if (tz)
-+		do_get_tz(tz);
-+	return 0;
-+}
++	BUG_ON(info->gd == NULL);
++	del_gendisk(info->gd);
++	put_disk(info->gd);
++	info->gd = NULL;
 +
-+/* This will break when the xtime seconds get inaccurate, but that is
-+ * unlikely */
-+time_t __vsyscall(1) vtime(time_t *t)
-+{
-+	if (!__sysctl_vsyscall)
-+		return time_syscall(t);
-+	else if (t)
-+		*t = __xtime.tv_sec;		
-+	return __xtime.tv_sec;
++	xlbd_put_major_info(info->mi);
++	info->mi = NULL;
++
++	BUG_ON(info->rq == NULL);
++	blk_cleanup_queue(info->rq);
++	info->rq = NULL;
 +}
 +
-+long __vsyscall(2) venosys_0(void)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++int
++xlvbd_barrier(struct blkfront_info *info)
 +{
-+	return -ENOSYS;
-+}
++	int err;
 +
-+long __vsyscall(3) venosys_1(void)
++	err = blk_queue_ordered(info->rq,
++		info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
++	if (err)
++		return err;
++	printk(KERN_INFO "blkfront: %s: barriers %s\n",
++	       info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
++	return 0;
++}
++#else
++int
++xlvbd_barrier(struct blkfront_info *info)
 +{
++	printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name);
 +	return -ENOSYS;
 +}
++#endif
 +
-+#ifdef CONFIG_SYSCTL
-+
-+#define SYSCALL 0x050f
-+#define NOP2    0x9090
-+
-+/*
-+ * NOP out syscall in vsyscall page when not needed.
-+ */
-+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-+                        void __user *buffer, size_t *lenp, loff_t *ppos)
++#ifdef CONFIG_SYSFS
++static ssize_t show_media(struct device *dev,
++		                  struct device_attribute *attr, char *buf)
 +{
-+	extern u16 vsysc1, vsysc2;
-+	u16 *map1, *map2;
-+	int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-+	if (!write)
-+		return ret;
-+	/* gcc has some trouble with __va(__pa()), so just do it this
-+	   way. */
-+	map1 = ioremap(__pa_symbol(&vsysc1), 2);
-+	if (!map1)
-+		return -ENOMEM;
-+	map2 = ioremap(__pa_symbol(&vsysc2), 2);
-+	if (!map2) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
-+	if (!sysctl_vsyscall) {
-+		*map1 = SYSCALL;
-+		*map2 = SYSCALL;
-+	} else {
-+		*map1 = NOP2;
-+		*map2 = NOP2;
-+	}
-+	iounmap(map2);
-+out:
-+	iounmap(map1);
-+	return ret;
-+}
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct blkfront_info *info = xendev->dev.driver_data;
 +
-+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
-+				void __user *oldval, size_t __user *oldlenp,
-+				void __user *newval, size_t newlen,
-+				void **context)
-+{
-+	return -ENOSYS;
++	if (info->gd->flags & GENHD_FL_CD)
++		return sprintf(buf, "cdrom\n");
++	return sprintf(buf, "disk\n");
 +}
 +
-+static ctl_table kernel_table2[] = {
-+	{ .ctl_name = 99, .procname = "vsyscall64",
-+	  .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
-+	  .strategy = vsyscall_sysctl_nostrat,
-+	  .proc_handler = vsyscall_sysctl_change },
-+	{ 0, }
-+};
-+
-+static ctl_table kernel_root_table2[] = {
-+	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-+	  .child = kernel_table2 },
-+	{ 0 },
++static struct device_attribute xlvbd_attrs[] = {
++	__ATTR(media, S_IRUGO, show_media, NULL),
 +};
 +
-+#endif
-+
-+static void __init map_vsyscall(void)
++int xlvbd_sysfs_addif(struct blkfront_info *info)
 +{
-+	extern char __vsyscall_0;
-+	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++	int i;
++	int error = 0;
 +
-+	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++	for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) {
++		error = device_create_file(info->gd->driverfs_dev,
++				&xlvbd_attrs[i]);
++		if (error)
++			goto fail;
++	}
++	return 0;
++
++fail:
++	while (--i >= 0)
++		device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
++	return error;
 +}
 +
-+#ifdef CONFIG_XEN
-+static void __init map_vsyscall_user(void)
++void xlvbd_sysfs_delif(struct blkfront_info *info)
 +{
-+	extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
-+	extern char __vsyscall_0;
-+	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++	int i;
 +
-+	__set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++	for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++)
++		device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
 +}
-+#endif
 +
-+static int __init vsyscall_init(void)
-+{
-+	BUG_ON(((unsigned long) &vgettimeofday !=
-+			VSYSCALL_ADDR(__NR_vgettimeofday)));
-+	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-+	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-+	map_vsyscall();
-+#ifdef CONFIG_XEN
-+	map_vsyscall_user();
-+	sysctl_vsyscall = 0; /* disable vgettimeofay() */
-+#endif
-+#ifdef CONFIG_SYSCTL
-+	register_sysctl_table(kernel_root_table2, 0);
-+#endif
-+	return 0;
-+}
++#endif /* CONFIG_SYSFS */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blktap/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blktap/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
++LINUXINCLUDE += -I../xen/include/public/io
++
++obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++
++xenblktap-y := xenbus.o interface.o blktap.o 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blktap/blktap.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blktap/blktap.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1681 @@
++/******************************************************************************
++ * drivers/xen/blktap/blktap.c
++ * 
++ * Back-end driver for user level virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. Requests
++ * are remapped to a user-space memory region.
++ *
++ * Based on the blkback driver code.
++ * 
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Clean ups and fix ups:
++ *    Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/driver_util.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/errno.h>
++#include <linux/major.h>
++#include <linux/gfp.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++#include <asm/tlbflush.h>
++
++#define MAX_TAP_DEV 256     /*the maximum number of tapdisk ring devices    */
++#define MAX_DEV_NAME 100    /*the max tapdisk ring device name e.g. blktap0 */
 +
-+__initcall(vsyscall_init);
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/kernel/xen_entry.S tmp-linux-2.6-xen.patch/arch/x86_64/kernel/xen_entry.S
---- pristine-linux-2.6.18.2/arch/x86_64/kernel/xen_entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/kernel/xen_entry.S	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,40 @@
 +/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */                        
-+/* Offsets into shared_info_t. */                
-+#define evtchn_upcall_pending		/* 0 */
-+#define evtchn_upcall_mask		1
++ * The maximum number of requests that can be outstanding at any time
++ * is determined by 
++ *
++ *   [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST] 
++ *
++ * where mmap_alloc < MAX_DYNAMIC_MEM.
++ *
++ * TODO:
++ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
++ * sysfs.
++ */
++#define BLK_RING_SIZE		__RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++#define MAX_DYNAMIC_MEM		BLK_RING_SIZE
++#define MAX_PENDING_REQS	BLK_RING_SIZE
++#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#define MMAP_VADDR(_start, _req,_seg)                                   \
++        (_start +                                                       \
++         ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
++         ((_seg) * PAGE_SIZE))
++static int blkif_reqs = MAX_PENDING_REQS;
++static int mmap_pages = MMAP_PAGES;
 +
-+#define sizeof_vcpu_shift		6
++#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
++		      * have a bunch of pages reserved for shared
++		      * memory rings.
++		      */
 +
-+#ifdef CONFIG_SMP
-+//#define preempt_disable(reg)	incl threadinfo_preempt_count(reg)
-+//#define preempt_enable(reg)	decl threadinfo_preempt_count(reg)
-+#define preempt_disable(reg)
-+#define preempt_enable(reg)
-+#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%rbp)			; \
-+				movq %gs:pda_cpunumber,reg		; \
-+				shl  $32, reg				; \
-+				shr  $32-sizeof_vcpu_shift,reg		; \
-+				addq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)	preempt_enable(%rbp)			; \
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
++/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
++typedef struct domid_translate {
++	unsigned short domid;
++	unsigned short busid;
++} domid_translate_t ;
 +
-+#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
++typedef struct domid_translate_ext {
++	unsigned short domid;
++	u32 busid;
++} domid_translate_ext_t ;
 +
-+VGCF_IN_SYSCALL = (1<<8)
-+        
-+	
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -32,6 +32,10 @@ cflags-$(CONFIG_MK8) += $(call cc-option
- cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
- cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
- 
-+cppflags-$(CONFIG_XEN) += \
-+	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
-+CPPFLAGS += $(cppflags-y)
++/*Data struct associated with each of the tapdisk devices*/
++typedef struct tap_blkif {
++	struct vm_area_struct *vma;   /*Shared memory area                   */
++	unsigned long rings_vstart;   /*Kernel memory mapping                */
++	unsigned long user_vstart;    /*User memory mapping                  */
++	unsigned long dev_inuse;      /*One process opens device at a time.  */
++	unsigned long dev_pending;    /*In process of being opened           */
++	unsigned long ring_ok;        /*make this ring->state                */
++	blkif_front_ring_t ufe_ring;  /*Rings up to user space.              */
++	wait_queue_head_t wait;       /*for poll                             */
++	unsigned long mode;           /*current switching mode               */
++	int minor;                    /*Minor number for tapdisk device      */
++	pid_t pid;                    /*tapdisk process id                   */
++	enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace 
++						  shutdown                   */
++	unsigned long *idx_map;       /*Record the user ring id to kern 
++					[req id, idx] tuple                  */
++	blkif_t *blkif;               /*Associate blkif with tapdev          */
++	struct domid_translate_ext trans; /*Translation from domid to bus.   */
++} tap_blkif_t;
 +
- cflags-y += -m64
- cflags-y += -mno-red-zone
- cflags-y += -mcmodel=kernel
-@@ -74,6 +78,21 @@ boot := arch/x86_64/boot
- PHONY += bzImage bzlilo install archmrproper \
- 	 fdimage fdimage144 fdimage288 isoimage archclean
- 
-+ifdef CONFIG_XEN
-+CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
-+LDFLAGS_vmlinux := -e _start
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+#Default target when executing "make"
-+all: vmlinuz
++static struct tap_blkif *tapfds[MAX_TAP_DEV];
++static int blktap_next_minor;
 +
-+vmlinuz: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) $@
++module_param(blkif_reqs, int, 0);
++/* Run-time switchable: /sys/module/blktap/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
 +
-+install:
-+	$(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- #Default target when executing "make"
- all: bzImage
- 
-@@ -94,6 +113,7 @@ fdimage fdimage144 fdimage288 isoimage: 
- 
- install:
- 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 
-+endif
- 
- archclean:
- 	$(Q)$(MAKE) $(clean)=$(boot)
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/mm/fault-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/mm/fault-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/mm/fault-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,724 @@
 +/*
-+ *  linux/arch/x86-64/mm/fault.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ * Each outstanding request that we've passed to the lower device layers has a 
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
++ * the pendcnt towards zero. When it hits zero, the specified domain has a 
++ * response queued for it, with the saved 'id' passed back.
 + */
++typedef struct {
++	blkif_t       *blkif;
++	u64            id;
++	unsigned short mem_idx;
++	int            nr_pages;
++	atomic_t       pendcnt;
++	unsigned short operation;
++	int            status;
++	struct list_head free_list;
++	int            inuse;
++} pending_req_t;
 +
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/compiler.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+#include <asm/smp.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm/kdebug.h>
-+#include <asm-generic/sections.h>
-+
-+/* Page fault error code bits */
-+#define PF_PROT	(1<<0)		/* or no page found */
-+#define PF_WRITE	(1<<1)
-+#define PF_USER	(1<<2)
-+#define PF_RSVD	(1<<3)
-+#define PF_INSTR	(1<<4)
++static pending_req_t *pending_reqs[MAX_PENDING_REQS];
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
++static int alloc_pending_reqs;
 +
-+#ifdef CONFIG_KPROBES
-+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++typedef unsigned int PEND_RING_IDX;
 +
-+/* Hook to register for page fault notifications */
-+int register_page_fault_notifier(struct notifier_block *nb)
-+{
-+	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++static inline int MASK_PEND_IDX(int i) { 
++	return (i & (MAX_PENDING_REQS-1));
 +}
 +
-+int unregister_page_fault_notifier(struct notifier_block *nb)
-+{
-+	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
++	return (req - pending_reqs[idx]);
 +}
 +
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
-+{
-+	struct die_args args = {
-+		.regs = regs,
-+		.str = str,
-+		.err = err,
-+		.trapnr = trap,
-+		.signr = sig
-+	};
-+	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
-+}
-+#else
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
-+{
-+	return NOTIFY_DONE;
-+}
-+#endif
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
 +
-+void bust_spinlocks(int yes)
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **foreign_pages[MAX_DYNAMIC_MEM];
++static inline unsigned long idx_to_kaddr(
++	unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
 +{
-+	int loglevel_save = console_loglevel;
-+	if (yes) {
-+		oops_in_progress = 1;
-+	} else {
-+#ifdef CONFIG_VT
-+		unblank_screen();
-+#endif
-+		oops_in_progress = 0;
-+		/*
-+		 * OK, the message is on the console.  Now we call printk()
-+		 * without oops_in_progress set so that printk will give klogd
-+		 * a poke.  Hold onto your hats...
-+		 */
-+		console_loglevel = 15;		/* NMI oopser may have shut the console up */
-+		printk(" ");
-+		console_loglevel = loglevel_save;
-+	}
++	unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
++	unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
++	return (unsigned long)pfn_to_kaddr(pfn);
 +}
 +
-+/* Sometimes the CPU reports invalid exceptions on prefetch.
-+   Check that here and ignore.
-+   Opcode checker based on code by Richard Brunner */
-+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+				unsigned long error_code)
-+{ 
-+	unsigned char *instr;
-+	int scan_more = 1;
-+	int prefetch = 0; 
-+	unsigned char *max_instr;
++static unsigned short mmap_alloc = 0;
++static unsigned short mmap_lock = 0;
++static unsigned short mmap_inuse = 0;
 +
-+	/* If it was a exec fault ignore */
-+	if (error_code & PF_INSTR)
-+		return 0;
-+	
-+	instr = (unsigned char *)convert_rip_to_linear(current, regs);
-+	max_instr = instr + 15;
++/******************************************************************
++ * GRANT HANDLES
++ */
 +
-+	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
-+		return 0;
++/* When using grant tables to map a frame for device access then the
++ * handle returned must be used to unmap the frame. This is needed to
++ * drop the ref count on the frame.
++ */
++struct grant_handle_pair
++{
++        grant_handle_t kernel;
++        grant_handle_t user;
++};
++#define INVALID_GRANT_HANDLE	0xFFFF
 +
-+	while (scan_more && instr < max_instr) { 
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
++static struct grant_handle_pair 
++    pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
++#define pending_handle(_id, _idx, _i) \
++    (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
++    + (_i)])
 +
-+		if (__get_user(opcode, instr))
-+			break; 
 +
-+		instr_hi = opcode & 0xf0; 
-+		instr_lo = opcode & 0x0f; 
-+		instr++;
++static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
 +
-+		switch (instr_hi) { 
-+		case 0x20:
-+		case 0x30:
-+			/* Values 0x26,0x2E,0x36,0x3E are valid x86
-+			   prefixes.  In long mode, the CPU will signal
-+			   invalid opcode if some of these prefixes are
-+			   present so we will never get here anyway */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+			
-+		case 0x40:
-+			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
-+			   Need to figure out under what instruction mode the
-+			   instruction was issued ... */
-+			/* Could check the LDT for lm, but for now it's good
-+			   enough to assume that long mode only uses well known
-+			   segments or kernel. */
-+			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
-+			break;
-+			
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;		
-+		case 0xF0:
-+			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;			
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
-+			if (__get_user(opcode, instr)) 
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;			
-+		default:
-+			scan_more = 0;
-+			break;
-+		} 
-+	}
-+	return prefetch;
-+}
++#define BLKTAP_MINOR 0  /*/dev/xen/blktap has a dynamic major */
++#define BLKTAP_DEV_DIR  "/dev/xen"
 +
-+static int bad_address(void *p) 
-+{ 
-+	unsigned long dummy;
-+	return __get_user(dummy, (unsigned long *)p);
-+} 
++static int blktap_major;
 +
-+void dump_pagetable(unsigned long address)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++/* blktap IOCTLs: */
++#define BLKTAP_IOCTL_KICK_FE         1
++#define BLKTAP_IOCTL_KICK_BE         2 /* currently unused */
++#define BLKTAP_IOCTL_SETMODE         3
++#define BLKTAP_IOCTL_SENDPID	     4
++#define BLKTAP_IOCTL_NEWINTF	     5
++#define BLKTAP_IOCTL_MINOR	     6
++#define BLKTAP_IOCTL_MAJOR	     7
++#define BLKTAP_QUERY_ALLOC_REQS      8
++#define BLKTAP_IOCTL_FREEINTF        9
++#define BLKTAP_IOCTL_NEWINTF_EXT     50
++#define BLKTAP_IOCTL_PRINT_IDXS      100  
 +
-+	pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+	pgd += pgd_index(address);
-+	if (bad_address(pgd)) goto bad;
-+	printk("PGD %lx ", pgd_val(*pgd));
-+	if (!pgd_present(*pgd)) goto ret; 
++/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE)             */
++#define BLKTAP_MODE_PASSTHROUGH      0x00000000  /* default            */
++#define BLKTAP_MODE_INTERCEPT_FE     0x00000001
++#define BLKTAP_MODE_INTERCEPT_BE     0x00000002  /* unimp.             */
 +
-+	pud = pud_offset(pgd, address);
-+	if (bad_address(pud)) goto bad;
-+	printk("PUD %lx ", pud_val(*pud));
-+	if (!pud_present(*pud))	goto ret;
++#define BLKTAP_MODE_INTERPOSE \
++           (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
 +
-+	pmd = pmd_offset(pud, address);
-+	if (bad_address(pmd)) goto bad;
-+	printk("PMD %lx ", pmd_val(*pmd));
-+	if (!pmd_present(*pmd))	goto ret;	 
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (bad_address(pte)) goto bad;
-+	printk("PTE %lx", pte_val(*pte)); 
-+ret:
-+	printk("\n");
-+	return;
-+bad:
-+	printk("BAD\n");
++static inline int BLKTAP_MODE_VALID(unsigned long arg)
++{
++	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
++		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
++                (arg == BLKTAP_MODE_INTERPOSE   ));
 +}
 +
-+static const char errata93_warning[] = 
-+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-+KERN_ERR "******* Please consider a BIOS update.\n"
-+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++/* Requests passing through the tap to userspace are re-assigned an ID.
++ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
++ * ring ID. 
++ */
 +
-+/* Workaround for K8 erratum #93 & buggy BIOS.
-+   BIOS SMM functions are required to use a specific workaround
-+   to avoid corruption of the 64bit RIP register on C stepping K8. 
-+   A lot of BIOS that didn't get tested properly miss this. 
-+   The OS sees this as a page fault with the upper 32bits of RIP cleared.
-+   Try to work around it here.
-+   Note we only handle faults in kernel here. */
++static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
++{
++        return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++}
 +
-+static int is_errata93(struct pt_regs *regs, unsigned long address) 
++extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
 +{
-+	static int warned;
-+	if (address != regs->rip)
-+		return 0;
-+	if ((address >> 32) != 0) 
-+		return 0;
-+	address |= 0xffffffffUL << 32;
-+	if ((address >= (u64)_stext && address <= (u64)_etext) || 
-+	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
-+		if (!warned) {
-+			printk(errata93_warning); 		
-+			warned = 1;
-+		}
-+		regs->rip = address;
-+		return 1;
-+	}
-+	return 0;
-+} 
++        return (PEND_RING_IDX)(id & 0x0000ffff);
++}
++
++extern inline int ID_TO_MIDX(unsigned long id)
++{
++        return (int)(id >> 16);
++}
 +
-+int unhandled_signal(struct task_struct *tsk, int sig)
++#define INVALID_REQ 0xdead0000
++
++/*TODO: Convert to a free list*/
++static inline int GET_NEXT_REQ(unsigned long *idx_map)
 +{
-+	if (tsk->pid == 1)
-+		return 1;
-+	if (tsk->ptrace & PT_PTRACED)
-+		return 0;
-+	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
-+		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++	int i;
++	for (i = 0; i < MAX_PENDING_REQS; i++)
++		if (idx_map[i] == INVALID_REQ)
++			return i;
++
++	return INVALID_REQ;
 +}
 +
-+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-+				 unsigned long error_code)
++static inline int OFFSET_TO_USR_IDX(int offset)
 +{
-+	unsigned long flags = oops_begin();
-+	struct task_struct *tsk;
++	return offset / BLKIF_MAX_SEGMENTS_PER_REQUEST;
++}
 +
-+	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-+	       current->comm, address);
-+	dump_pagetable(address);
-+	tsk = current;
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
-+	__die("Bad pagetable", regs, error_code);
-+	oops_end(flags);
-+	do_exit(SIGKILL);
++static inline int OFFSET_TO_SEG(int offset)
++{
++	return offset % BLKIF_MAX_SEGMENTS_PER_REQUEST;
 +}
 +
-+/*
-+ * Handle a fault on the vmalloc area
-+ *
-+ * This assumes no large pages in there.
++
++#define BLKTAP_INVALID_HANDLE(_g) \
++    (((_g->kernel) == INVALID_GRANT_HANDLE) &&  \
++     ((_g->user) == INVALID_GRANT_HANDLE))
++
++#define BLKTAP_INVALIDATE_HANDLE(_g) do {       \
++    (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
++    } while(0)
++
++
++/******************************************************************
++ * BLKTAP VM OPS
 + */
-+static int vmalloc_fault(unsigned long address)
++
++static struct page *blktap_nopage(struct vm_area_struct *vma,
++				  unsigned long address,
++				  int *type)
 +{
-+	pgd_t *pgd, *pgd_ref;
-+	pud_t *pud, *pud_ref;
-+	pmd_t *pmd, *pmd_ref;
-+	pte_t *pte, *pte_ref;
++	/*
++	 * if the page has not been mapped in by the driver then return
++	 * NOPAGE_SIGBUS to the domain.
++	 */
 +
-+	/* Copy kernel mappings over when needed. This can also
-+	   happen within a race in page table update. In the later
-+	   case just flush. */
++	return NOPAGE_SIGBUS;
++}
 +
-+	/* On Xen the line below does not always work. Needs investigating! */
-+	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
-+	pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+	pgd += pgd_index(address);
-+	pgd_ref = pgd_offset_k(address);
-+	if (pgd_none(*pgd_ref))
-+		return -1;
-+	if (pgd_none(*pgd))
-+		set_pgd(pgd, *pgd_ref);
-+	else
-+		BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++static pte_t blktap_clear_pte(struct vm_area_struct *vma,
++			      unsigned long uvaddr,
++			      pte_t *ptep, int is_fullmm)
++{
++	pte_t copy;
++	tap_blkif_t *info;
++	int offset, seg, usr_idx, pending_idx, mmap_idx;
++	unsigned long uvstart = vma->vm_start + (RING_PAGES << PAGE_SHIFT);
++	unsigned long kvaddr;
++	struct page **map;
++	struct page *pg;
++	struct grant_handle_pair *khandle;
++	struct gnttab_unmap_grant_ref unmap[2];
++	int count = 0;
 +
-+	/* Below here mismatches are bugs because these lower tables
-+	   are shared */
++	/*
++	 * If the address is before the start of the grant mapped region or
++	 * if vm_file is NULL (meaning mmap failed and we have nothing to do)
++	 */
++	if (uvaddr < uvstart || vma->vm_file == NULL)
++		return ptep_get_and_clear_full(vma->vm_mm, uvaddr, 
++					       ptep, is_fullmm);
 +
-+	pud = pud_offset(pgd, address);
-+	pud_ref = pud_offset(pgd_ref, address);
-+	if (pud_none(*pud_ref))
-+		return -1;
-+	if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
-+		BUG();
-+	pmd = pmd_offset(pud, address);
-+	pmd_ref = pmd_offset(pud_ref, address);
-+	if (pmd_none(*pmd_ref))
-+		return -1;
-+	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-+		BUG();
-+	pte_ref = pte_offset_kernel(pmd_ref, address);
-+	if (!pte_present(*pte_ref))
-+		return -1;
-+	pte = pte_offset_kernel(pmd, address);
-+	/* Don't use pte_page here, because the mappings can point
-+	   outside mem_map, and the NUMA hash lookup cannot handle
-+	   that. */
-+	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-+		BUG();
-+	return 0;
-+}
++	info = vma->vm_file->private_data;
++	map = vma->vm_private_data;
 +
-+int page_fault_trace = 0;
-+int exception_trace = 1;
++	/* TODO Should these be changed to if statements? */
++	BUG_ON(!info);
++	BUG_ON(!info->idx_map);
++	BUG_ON(!map);
 +
++	offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT);
++	usr_idx = OFFSET_TO_USR_IDX(offset);
++	seg = OFFSET_TO_SEG(offset);
 +
-+#define MEM_VERBOSE 1
++	pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++	mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
 +
-+#ifdef MEM_VERBOSE
-+#define MEM_LOG(_f, _a...)			\
-+	printk("fault.c:[%d]-> " _f "\n",	\
-+	__LINE__ , ## _a )
-+#else
-+#define MEM_LOG(_f, _a...) ((void)0)
-+#endif
++	kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
++	pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++	ClearPageReserved(pg);
++	map[offset + RING_PAGES] = NULL;
 +
-+static int spurious_fault(struct pt_regs *regs,
-+			  unsigned long address,
-+			  unsigned long error_code)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++	khandle = &pending_handle(mmap_idx, pending_idx, seg);
 +
-+#ifdef CONFIG_XEN
-+	/* Faults in hypervisor area are never spurious. */
-+	if ((address >= HYPERVISOR_VIRT_START) &&
-+	    (address < HYPERVISOR_VIRT_END))
-+		return 0;
-+#endif
++	if (khandle->kernel != INVALID_GRANT_HANDLE) {
++		gnttab_set_unmap_op(&unmap[count], kvaddr, 
++				    GNTMAP_host_map, khandle->kernel);
++		count++;
 +
-+	/* Reserved-bit violation or user access to kernel space? */
-+	if (error_code & (PF_RSVD|PF_USER))
-+		return 0;
++		set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, 
++				    INVALID_P2M_ENTRY);
++	}
 +
-+	pgd = init_mm.pgd + pgd_index(address);
-+	if (!pgd_present(*pgd))
-+		return 0;
++	if (khandle->user != INVALID_GRANT_HANDLE) {
++		BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
 +
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return 0;
++		copy = *ptep;
++		gnttab_set_unmap_op(&unmap[count], virt_to_machine(ptep), 
++				    GNTMAP_host_map 
++				    | GNTMAP_application_map 
++				    | GNTMAP_contains_pte,
++				    khandle->user);
++		count++;
++	} else {
++		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap));
 +
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return 0;
++		/* USING SHADOW PAGE TABLES. */
++		copy = ptep_get_and_clear_full(vma->vm_mm, uvaddr, ptep,
++					       is_fullmm);
++	}
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (!pte_present(*pte))
-+		return 0;
-+	if ((error_code & PF_WRITE) && !pte_write(*pte))
-+		return 0;
-+	if ((error_code & PF_INSTR) && (pte_val(*pte) & _PAGE_NX))
-+		return 0;
++	if (count) {
++		BLKTAP_INVALIDATE_HANDLE(khandle);
++		if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++					      unmap, count))
++			BUG();
++	}
 +
-+	return 1;
++	return copy;
 +}
 +
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
++struct vm_operations_struct blktap_vm_ops = {
++	nopage:   blktap_nopage,
++	zap_pte:  blktap_clear_pte,
++};
++
++/******************************************************************
++ * BLKTAP FILE OPS
 + */
-+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
-+					unsigned long error_code)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	unsigned long address;
-+	const struct exception_table_entry *fixup;
-+	int write;
-+	unsigned long flags;
-+	siginfo_t info;
++ 
++/*Function Declarations*/
++static tap_blkif_t *get_next_free_dev(void);
++static int blktap_open(struct inode *inode, struct file *filp);
++static int blktap_release(struct inode *inode, struct file *filp);
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++                        unsigned int cmd, unsigned long arg);
++static unsigned int blktap_poll(struct file *file, poll_table *wait);
 +
-+	if (!user_mode(regs))
-+		error_code &= ~PF_USER; /* means kernel */
++static const struct file_operations blktap_fops = {
++	.owner   = THIS_MODULE,
++	.poll    = blktap_poll,
++	.ioctl   = blktap_ioctl,
++	.open    = blktap_open,
++	.release = blktap_release,
++	.mmap    = blktap_mmap,
++};
 +
-+	tsk = current;
-+	mm = tsk->mm;
-+	prefetchw(&mm->mmap_sem);
 +
-+	/* get the address */
-+	address = current_vcpu_info()->arch.cr2;
++static tap_blkif_t *get_next_free_dev(void)
++{
++	struct class *class;
++	tap_blkif_t *info;
++	int minor;
 +
-+	info.si_code = SEGV_MAPERR;
++	/*
++	 * This is called only from the ioctl, which
++	 * means we should always have interrupts enabled.
++	 */
++	BUG_ON(irqs_disabled());
++
++	spin_lock_irq(&pending_free_lock);
++
++	/* tapfds[0] is always NULL */
 +
++	for (minor = 1; minor < blktap_next_minor; minor++) {
++		info = tapfds[minor];
++		/* we could have failed a previous attempt. */
++		if (!info ||
++		    ((info->dev_inuse == 0) &&
++		     (info->dev_pending == 0)) ) {
++			info->dev_pending = 1;
++			goto found;
++		}
++	}
++	info = NULL;
++	minor = -1;
 +
 +	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 9) == 0.
++	 * We didn't find free device. If we can still allocate
++	 * more, then we grab the next device minor that is
++	 * available.  This is done while we are still under
++	 * the protection of the pending_free_lock.
 +	 */
-+	if (unlikely(address >= TASK_SIZE64)) {
-+		/*
-+		 * Don't check for the module range here: its PML4
-+		 * is always initialized because it's shared with the main
-+		 * kernel text. Only vmalloc may need PML4 syncups.
-+		 */
-+		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-+		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
-+			if (vmalloc_fault(address) >= 0)
-+				return;
++	if (blktap_next_minor < MAX_TAP_DEV)
++		minor = blktap_next_minor++;
++found:
++	spin_unlock_irq(&pending_free_lock);
++
++	if (!info && minor > 0) {
++		info = kzalloc(sizeof(*info), GFP_KERNEL);
++		if (unlikely(!info)) {
++			/*
++			 * If we failed here, try to put back
++			 * the next minor number. But if one
++			 * was just taken, then we just lose this
++			 * minor.  We can try to allocate this
++			 * minor again later.
++			 */
++			spin_lock_irq(&pending_free_lock);
++			if (blktap_next_minor == minor+1)
++				blktap_next_minor--;
++			spin_unlock_irq(&pending_free_lock);
++			goto out;
 +		}
-+		/* Can take a spurious fault if mapping changes R/O -> R/W. */
-+		if (spurious_fault(regs, address, error_code))
-+			return;
-+		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+						SIGSEGV) == NOTIFY_STOP)
-+			return;
++
++		info->minor = minor;
 +		/*
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
++		 * Make sure that we have a minor before others can
++		 * see us.
 +		 */
-+		goto bad_area_nosemaphore;
++		wmb();
++		tapfds[minor] = info;
++
++		if ((class = get_xen_class()) != NULL)
++			class_device_create(class, NULL,
++					    MKDEV(blktap_major, minor), NULL,
++					    "blktap%d", minor);
 +	}
 +
-+	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+					SIGSEGV) == NOTIFY_STOP)
-+		return;
++out:
++	return info;
++}
 +
-+	if (likely(regs->eflags & X86_EFLAGS_IF))
-+		local_irq_enable();
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif) 
++{
++	tap_blkif_t *info;
++	int i;
 +
-+	if (unlikely(page_fault_trace))
-+		printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
-+		       regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); 
++	for (i = 1; i < blktap_next_minor; i++) {
++		info = tapfds[i];
++		if ( info &&
++		     (info->trans.domid == domid) &&
++		     (info->trans.busid == xenbus_id) ) {
++			info->blkif = blkif;
++			info->status = RUNNING;
++			return i;
++		}
++	}
++	return -1;
++}
 +
-+	if (unlikely(error_code & PF_RSVD))
-+		pgtable_bad(address, regs, error_code);
++void signal_tapdisk(int idx) 
++{
++	tap_blkif_t *info;
++	struct task_struct *ptask;
 +
 +	/*
-+	 * If we're in an interrupt or have no user
-+	 * context, we must not take the fault..
++	 * if the userland tools set things up wrong, this could be negative;
++	 * just don't try to signal in this case
 +	 */
-+	if (unlikely(in_atomic() || !mm))
-+		goto bad_area_nosemaphore;
++	if (idx < 0)
++		return;
 +
-+ again:
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-+	 * erroneous fault occurring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibilty of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
-+	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & PF_USER) == 0 &&
-+		    !search_exception_tables(regs->rip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
++	info = tapfds[idx];
++	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++		return;
 +
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (likely(vma->vm_start <= address))
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & 4) {
-+		/* Allow userspace just enough access below the stack pointer
-+		 * to let the 'enter' instruction work.
-+		 */
-+		if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
-+			goto bad_area;
++	if (info->pid > 0) {
++		ptask = find_task_by_pid(info->pid);
++		if (ptask)
++			info->status = CLEANSHUTDOWN;
 +	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	info.si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & (PF_PROT|PF_WRITE)) {
-+		default:	/* 3: write, present */
-+			/* fall through */
-+		case PF_WRITE:		/* write, not present */
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+			write++;
-+			break;
-+		case PF_PROT:		/* read, present */
-+			goto bad_area;
-+		case 0:			/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+				goto bad_area;
++	info->blkif = NULL;
++
++	return;
++}
++
++static int blktap_open(struct inode *inode, struct file *filp)
++{
++	blkif_sring_t *sring;
++	int idx = iminor(inode) - BLKTAP_MINOR;
++	tap_blkif_t *info;
++	int i;
++	
++	/* ctrl device, treat differently */
++	if (!idx)
++		return 0;
++
++	info = tapfds[idx];
++
++	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
++		WPRINTK("Unable to open device /dev/xen/blktap%d\n",
++			idx);
++		return -ENODEV;
 +	}
 +
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+	switch (handle_mm_fault(mm, vma, address, write)) {
-+	case VM_FAULT_MINOR:
-+		tsk->min_flt++;
-+		break;
-+	case VM_FAULT_MAJOR:
-+		tsk->maj_flt++;
-+		break;
-+	case VM_FAULT_SIGBUS:
-+		goto do_sigbus;
-+	default:
-+		goto out_of_memory;
++	DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
++	
++	/*Only one process can access device at a time*/
++	if (test_and_set_bit(0, &info->dev_inuse))
++		return -EBUSY;
++
++	info->dev_pending = 0;
++	    
++	/* Allocate the fe ring. */
++	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
++	if (sring == NULL)
++		goto fail_nomem;
++
++	SetPageReserved(virt_to_page(sring));
++    
++	SHARED_RING_INIT(sring);
++	FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
++	
++	filp->private_data = info;
++	info->vma = NULL;
++
++	info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS, 
++				GFP_KERNEL);
++	
++	if (info->idx_map == NULL)
++		goto fail_nomem;
++
++	if (idx > 0) {
++		init_waitqueue_head(&info->wait);
++		for (i = 0; i < MAX_PENDING_REQS; i++) 
++			info->idx_map[i] = INVALID_REQ;
 +	}
 +
-+	up_read(&mm->mmap_sem);
-+	return;
++	DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
++	return 0;
 +
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
++ fail_nomem:
++	return -ENOMEM;
++}
 +
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & PF_USER) {
-+		if (is_prefetch(regs, address, error_code))
-+			return;
++static int blktap_release(struct inode *inode, struct file *filp)
++{
++	tap_blkif_t *info = filp->private_data;
++	
++	/* check for control device */
++	if (!info)
++		return 0;
 +
-+		/* Work around K8 erratum #100 K8 in compat mode
-+		   occasionally jumps to illegal addresses >4GB.  We
-+		   catch this here in the page fault handler because
-+		   these addresses are not reachable. Just detect this
-+		   case and return.  Any code segment in LDT is
-+		   compatibility mode. */
-+		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-+		    (address >> 32))
-+			return;
++	info->dev_inuse = 0;
++	DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
 +
-+		if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
-+			printk(
-+		       "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
-+					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
-+					tsk->comm, tsk->pid, address, regs->rip,
-+					regs->rsp, error_code);
-+		}
-+       
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		info.si_signo = SIGSEGV;
-+		info.si_errno = 0;
-+		/* info.si_code has been set above */
-+		info.si_addr = (void __user *)address;
-+		force_sig_info(SIGSEGV, &info, tsk);
-+		return;
++	/* Free the ring page. */
++	ClearPageReserved(virt_to_page(info->ufe_ring.sring));
++	free_page((unsigned long) info->ufe_ring.sring);
++
++	/* Clear any active mappings and free foreign map table */
++	if (info->vma) {
++		zap_page_range(
++			info->vma, info->vma->vm_start, 
++			info->vma->vm_end - info->vma->vm_start, NULL);
++
++		kfree(info->vma->vm_private_data);
++
++		info->vma = NULL;
 +	}
 +
-+no_context:
-+	
-+	/* Are we prepared to handle this kernel fault?  */
-+	fixup = search_exception_tables(regs->rip);
-+	if (fixup) {
-+		regs->rip = fixup->fixup;
-+		return;
++	if (info->idx_map) {
++		kfree(info->idx_map);
++		info->idx_map = NULL;
 +	}
 +
-+	/* 
-+	 * Hall of shame of CPU/BIOS bugs.
-+	 */
++	if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
++		if (info->blkif->xenblkd != NULL) {
++			kthread_stop(info->blkif->xenblkd);
++			info->blkif->xenblkd = NULL;
++		}
++		info->status = CLEANSHUTDOWN;
++	}
 +
-+ 	if (is_prefetch(regs, address, error_code))
-+ 		return;
++	return 0;
++}
 +
-+	if (is_errata93(regs, address))
-+		return; 
 +
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
++/* Note on mmap:
++ * We need to map pages to user space in a way that will allow the block
++ * subsystem set up direct IO to them.  This couldn't be done before, because
++ * there isn't really a sane way to translate a user virtual address down to a 
++ * physical address when the page belongs to another domain.
++ *
++ * My first approach was to map the page in to kernel memory, add an entry
++ * for it in the physical frame list (using alloc_lomem_region as in blkback)
++ * and then attempt to map that page up to user space.  This is disallowed
++ * by xen though, which realizes that we don't really own the machine frame
++ * underlying the physical page.
++ *
++ * The new approach is to provide explicit support for this in xen linux.
++ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
++ * mapped from other vms.  vma->vm_private_data is set up as a mapping 
++ * from pages to actual page structs.  There is a new clause in get_user_pages
++ * that does the right thing for this sort of mapping.
 + */
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	int size;
++	struct page **map;
++	int i;
++	tap_blkif_t *info = filp->private_data;
++	int ret;
 +
-+	flags = oops_begin();
++	if (info == NULL) {
++		WPRINTK("blktap: mmap, retrieving idx failed\n");
++		return -ENOMEM;
++	}
++	
++	vma->vm_flags |= VM_RESERVED;
++	vma->vm_ops = &blktap_vm_ops;
 +
-+	if (address < PAGE_SIZE)
-+		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++	size = vma->vm_end - vma->vm_start;
++	if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
++		WPRINTK("you _must_ map exactly %d pages!\n",
++		       mmap_pages + RING_PAGES);
++		return -EAGAIN;
++	}
++
++	size >>= PAGE_SHIFT;
++	info->rings_vstart = vma->vm_start;
++	info->user_vstart  = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
++    
++	/* Map the ring pages to the start of the region and reserve it. */
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		ret = vm_insert_page(vma, vma->vm_start,
++				     virt_to_page(info->ufe_ring.sring));
 +	else
-+		printk(KERN_ALERT "Unable to handle kernel paging request");
-+	printk(" at %016lx RIP: \n" KERN_ALERT,address);
-+	printk_address(regs->rip);
-+	dump_pagetable(address);
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
-+	__die("Oops", regs, error_code);
-+	/* Executive summary in case the body of the oops scrolled away */
-+	printk(KERN_EMERG "CR2: %016lx\n", address);
-+	oops_end(flags);
-+	do_exit(SIGKILL);
++		ret = remap_pfn_range(vma, vma->vm_start,
++				      __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
++				      PAGE_SIZE, vma->vm_page_prot);
++	if (ret) {
++		WPRINTK("Mapping user ring failed!\n");
++		goto fail;
++	}
 +
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (current->pid == 1) { 
-+		yield();
-+		goto again;
++	/* Mark this VM as containing foreign pages, and set up mappings. */
++	map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++		      * sizeof(struct page *),
++		      GFP_KERNEL);
++	if (map == NULL) {
++		WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
++		goto fail;
 +	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & 4)
-+		do_exit(SIGKILL);
-+	goto no_context;
 +
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
++	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
++		map[i] = NULL;
++    
++	vma->vm_private_data = map;
++	vma->vm_flags |= VM_FOREIGN;
++	vma->vm_flags |= VM_DONTCOPY;
 +
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & PF_USER))
-+		goto no_context;
++#ifdef CONFIG_X86
++	vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
 +
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	info.si_signo = SIGBUS;
-+	info.si_errno = 0;
-+	info.si_code = BUS_ADRERR;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(SIGBUS, &info, tsk);
-+	return;
++	info->vma = vma;
++	info->ring_ok = 1;
++	return 0;
++ fail:
++	/* Clear any active mappings. */
++	zap_page_range(vma, vma->vm_start, 
++		       vma->vm_end - vma->vm_start, NULL);
++
++	return -ENOMEM;
 +}
 +
-+DEFINE_SPINLOCK(pgd_lock);
-+struct page *pgd_list;
 +
-+void vmalloc_sync_all(void)
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++                        unsigned int cmd, unsigned long arg)
 +{
-+	/* Note that races in the updates of insync and start aren't 
-+	   problematic:
-+	   insync can only get set bits added, and updates to start are only
-+	   improving performance (without affecting correctness if undone). */
-+	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+	static unsigned long start = VMALLOC_START & PGDIR_MASK;
-+	unsigned long address;
-+
-+	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
-+		if (!test_bit(pgd_index(address), insync)) {
-+			const pgd_t *pgd_ref = pgd_offset_k(address);
-+			struct page *page;
++	tap_blkif_t *info = filp->private_data;
 +
-+			if (pgd_none(*pgd_ref))
-+				continue;
-+			spin_lock(&pgd_lock);
-+			for (page = pgd_list; page;
-+			     page = (struct page *)page->index) {
-+				pgd_t *pgd;
-+				pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+				if (pgd_none(*pgd))
-+					set_pgd(pgd, *pgd_ref);
-+				else
-+					BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++	switch(cmd) {
++	case BLKTAP_IOCTL_KICK_FE: 
++	{
++		/* There are fe messages to process. */
++		return blktap_read_ufe_ring(info);
++	}
++	case BLKTAP_IOCTL_SETMODE:
++	{
++		if (info) {
++			if (BLKTAP_MODE_VALID(arg)) {
++				info->mode = arg;
++				/* XXX: may need to flush rings here. */
++				DPRINTK("blktap: set mode to %lx\n", 
++				       arg);
++				return 0;
 +			}
-+			spin_unlock(&pgd_lock);
-+			set_bit(pgd_index(address), insync);
 +		}
-+		if (address == start)
-+			start = address + PGDIR_SIZE;
++		return 0;
 +	}
-+	/* Check that there is no need to do the same for the modules area. */
-+	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
-+	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 
-+				(__START_KERNEL & PGDIR_MASK)));
-+}
++	case BLKTAP_IOCTL_PRINT_IDXS:
++        {
++		if (info) {
++			printk("User Rings: \n-----------\n");
++			printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
++				"| req_prod: %2d, rsp_prod: %2d\n",
++				info->ufe_ring.rsp_cons,
++				info->ufe_ring.req_prod_pvt,
++				info->ufe_ring.sring->req_prod,
++				info->ufe_ring.sring->rsp_prod);
++		}
++            	return 0;
++        }
++	case BLKTAP_IOCTL_SENDPID:
++	{
++		if (info) {
++			info->pid = (pid_t)arg;
++			DPRINTK("blktap: pid received %d\n", 
++			       info->pid);
++		}
++		return 0;
++	}
++	case BLKTAP_IOCTL_NEWINTF:
++	{		
++		uint64_t val = (uint64_t)arg;
++		domid_translate_t *tr = (domid_translate_t *)&val;
 +
-+static int __init enable_pagefaulttrace(char *str)
-+{
-+	page_fault_trace = 1;
-+	return 1;
-+}
-+__setup("pagefaulttrace", enable_pagefaulttrace);
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/mm/init-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/mm/init-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/mm/init-xen.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1238 @@
-+/*
-+ *  linux/arch/x86_64/mm/init.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
-+ *  Copyright (C) 2002,2003 Andi Kleen <ak at suse.de>
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *	Modified for Xen.
-+ */
++		DPRINTK("NEWINTF Req for domid %d and bus id %d\n", 
++		       tr->domid, tr->busid);
++		info = get_next_free_dev();
++		if (!info) {
++			WPRINTK("Error initialising /dev/xen/blktap - "
++				"No more devices\n");
++			return -1;
++		}
++		info->trans.domid = tr->domid;
++		info->trans.busid = tr->busid;
++		return info->minor;
++	}
++	case BLKTAP_IOCTL_NEWINTF_EXT:
++	{
++		void __user *udata = (void __user *) arg;
++		domid_translate_ext_t tr;
 +
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pci.h>
-+#include <linux/poison.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/module.h>
-+#include <linux/memory_hotplug.h>
++		if (copy_from_user(&tr, udata, sizeof(domid_translate_ext_t)))
++			return -EFAULT;
 +
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/sections.h>
++		DPRINTK("NEWINTF_EXT Req for domid %d and bus id %d\n", 
++		       tr.domid, tr.busid);
++		info = get_next_free_dev();
++		if (!info) {
++			WPRINTK("Error initialising /dev/xen/blktap - "
++				"No more devices\n");
++			return -1;
++		}
++		info->trans.domid = tr.domid;
++		info->trans.busid = tr.busid;
++		return info->minor;
++	}
++	case BLKTAP_IOCTL_FREEINTF:
++	{
++		unsigned long dev = arg;
++		unsigned long flags;
 +
-+#include <xen/features.h>
++		info = tapfds[dev];
 +
-+#ifndef Dprintk
-+#define Dprintk(x...)
-+#endif
++		if ((dev > MAX_TAP_DEV) || !info)
++			return 0; /* should this be an error? */
 +
-+struct dma_mapping_ops* dma_ops;
-+EXPORT_SYMBOL(dma_ops);
++		spin_lock_irqsave(&pending_free_lock, flags);
++		if (info->dev_pending)
++			info->dev_pending = 0;
++		spin_unlock_irqrestore(&pending_free_lock, flags);
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+unsigned int __kernel_page_user;
-+EXPORT_SYMBOL(__kernel_page_user);
-+#endif
++		return 0;
++	}
++	case BLKTAP_IOCTL_MINOR:
++	{
++		unsigned long dev = arg;
 +
-+extern unsigned long *contiguous_bitmap;
++		info = tapfds[dev];
 +
-+static unsigned long dma_reserve __initdata;
++		if ((dev > MAX_TAP_DEV) || !info)
++			return -EINVAL;
 +
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+extern unsigned long start_pfn;
++		return info->minor;
++	}
++	case BLKTAP_IOCTL_MAJOR:
++		return blktap_major;
 +
-+/*
-+ * Use this until direct mapping is established, i.e. before __va() is 
-+ * available in init_memory_mapping().
-+ */
++	case BLKTAP_QUERY_ALLOC_REQS:
++	{
++		WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
++		       alloc_pending_reqs, blkif_reqs);
++		return (alloc_pending_reqs/blkif_reqs) * 100;
++	}
++	}
++	return -ENOIOCTLCMD;
++}
 +
-+#define addr_to_page(addr, page)				\
-+	(addr) &= PHYSICAL_PAGE_MASK;				\
-+	(page) = ((unsigned long *) ((unsigned long)		\
-+	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
-+	__START_KERNEL_map)))
++static unsigned int blktap_poll(struct file *filp, poll_table *wait)
++{
++	tap_blkif_t *info = filp->private_data;
++	
++	/* do not work on the control device */
++	if (!info)
++		return 0;
 +
-+static void __meminit early_make_page_readonly(void *va, unsigned int feature)
++	poll_wait(filp, &info->wait, wait);
++	if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
++		RING_PUSH_REQUESTS(&info->ufe_ring);
++		return POLLIN | POLLRDNORM;
++	}
++	return 0;
++}
++
++void blktap_kick_user(int idx)
 +{
-+	unsigned long addr, _va = (unsigned long)va;
-+	pte_t pte, *ptep;
-+	unsigned long *page = (unsigned long *) init_level4_pgt;
++	tap_blkif_t *info;
 +
-+	if (xen_feature(feature))
++	info = tapfds[idx];
++
++	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
 +		return;
 +
-+	addr = (unsigned long) page[pgd_index(_va)];
-+	addr_to_page(addr, page);
++	wake_up_interruptible(&info->wait);
 +
-+	addr = page[pud_index(_va)];
-+	addr_to_page(addr, page);
++	return;
++}
 +
-+	addr = page[pmd_index(_va)];
-+	addr_to_page(addr, page);
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++                          unsigned short op, int st);
 +
-+	ptep = (pte_t *) &page[pte_index(_va)];
++/******************************************************************
++ * misc small helpers
++ */
++static int req_increase(void)
++{
++	int i, j;
 +
-+	pte.pte = ptep->pte & ~_PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
-+		BUG();
++	if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock) 
++		return -EINVAL;
++
++	pending_reqs[mmap_alloc]  = kzalloc(sizeof(pending_req_t)
++					    * blkif_reqs, GFP_KERNEL);
++	foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
++
++	if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
++		goto out_of_memory;
++
++	DPRINTK("%s: reqs=%d, pages=%d\n",
++		__FUNCTION__, blkif_reqs, mmap_pages);
++
++	for (i = 0; i < MAX_PENDING_REQS; i++) {
++		list_add_tail(&pending_reqs[mmap_alloc][i].free_list, 
++			      &pending_free);
++		pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
++		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
++			BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc, 
++								 i, j));
++	}
++
++	mmap_alloc++;
++	DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
++	return 0;
++
++ out_of_memory:
++	free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++	kfree(pending_reqs[mmap_alloc]);
++	WPRINTK("%s: out of memory\n", __FUNCTION__);
++	return -ENOMEM;
 +}
 +
-+static void __make_page_readonly(void *va)
++static void mmap_req_del(int mmap)
 +{
-+	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+	unsigned long addr = (unsigned long) va;
++	BUG_ON(!spin_is_locked(&pending_free_lock));
 +
-+	pgd = pgd_offset_k(addr);
-+	pud = pud_offset(pgd, addr);
-+	pmd = pmd_offset(pud, addr);
-+	ptep = pte_offset_kernel(pmd, addr);
++	kfree(pending_reqs[mmap]);
++	pending_reqs[mmap] = NULL;
 +
-+	pte.pte = ptep->pte & ~_PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+		xen_l1_entry_update(ptep, pte); /* fallback */
++	free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++	foreign_pages[mmap] = NULL;
 +
-+	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+		__make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
++	mmap_lock = 0;
++	DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
++	mmap_alloc--;
 +}
 +
-+static void __make_page_writable(void *va)
++static pending_req_t* alloc_req(void)
 +{
-+	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+	unsigned long addr = (unsigned long) va;
++	pending_req_t *req = NULL;
++	unsigned long flags;
 +
-+	pgd = pgd_offset_k(addr);
-+	pud = pud_offset(pgd, addr);
-+	pmd = pmd_offset(pud, addr);
-+	ptep = pte_offset_kernel(pmd, addr);
++	spin_lock_irqsave(&pending_free_lock, flags);
 +
-+	pte.pte = ptep->pte | _PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+		xen_l1_entry_update(ptep, pte); /* fallback */
++	if (!list_empty(&pending_free)) {
++		req = list_entry(pending_free.next, pending_req_t, free_list);
++		list_del(&req->free_list);
++	}
 +
-+	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+		__make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
-+}
++	if (req) {
++		req->inuse = 1;
++		alloc_pending_reqs++;
++	}
++	spin_unlock_irqrestore(&pending_free_lock, flags);
 +
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+	if (!xen_feature(feature))
-+		__make_page_readonly(va);
++	return req;
 +}
 +
-+void make_page_writable(void *va, unsigned int feature)
++static void free_req(pending_req_t *req)
 +{
-+	if (!xen_feature(feature))
-+		__make_page_writable(va);
-+}
++	unsigned long flags;
++	int was_empty;
 +
-+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
++	spin_lock_irqsave(&pending_free_lock, flags);
 +
-+	while (nr-- != 0) {
-+		__make_page_readonly(va);
-+		va = (void*)((unsigned long)va + PAGE_SIZE);
++	alloc_pending_reqs--;
++	req->inuse = 0;
++	if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
++		mmap_inuse--;
++		if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
++		spin_unlock_irqrestore(&pending_free_lock, flags);
++		return;
 +	}
++	was_empty = list_empty(&pending_free);
++	list_add(&req->free_list, &pending_free);
++
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++
++	if (was_empty)
++		wake_up(&pending_free_wq);
 +}
 +
-+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
++			    int tapidx)
 +{
-+	if (xen_feature(feature))
++	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++	unsigned int i, invcount = 0;
++	struct grant_handle_pair *khandle;
++	uint64_t ptep;
++	int ret, mmap_idx;
++	unsigned long kvaddr, uvaddr;
++	tap_blkif_t *info;
++	
++
++	info = tapfds[tapidx];
++
++	if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
++		WPRINTK("fast_flush: Couldn't get info!\n");
 +		return;
++	}
 +
-+	while (nr-- != 0) {
-+		__make_page_writable(va);
-+		va = (void*)((unsigned long)va + PAGE_SIZE);
++	if (info->vma != NULL &&
++	    xen_feature(XENFEAT_auto_translated_physmap)) {
++		down_write(&info->vma->vm_mm->mmap_sem);
++		zap_page_range(info->vma, 
++			       MMAP_VADDR(info->user_vstart, u_idx, 0), 
++			       req->nr_pages << PAGE_SHIFT, NULL);
++		up_write(&info->vma->vm_mm->mmap_sem);
++		return;
 +	}
-+}
 +
-+/*
-+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-+ * physical space so we can cache the place of the first one and move
-+ * around without checking the pgd every time.
-+ */
++	mmap_idx = req->mem_idx;
 +
-+void show_mem(void)
-+{
-+	long i, total = 0, reserved = 0;
-+	long shared = 0, cached = 0;
-+	pg_data_t *pgdat;
-+	struct page *page;
++	for (i = 0; i < req->nr_pages; i++) {
++		kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
++		uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
 +
-+	printk(KERN_INFO "Mem-info:\n");
-+	show_free_areas();
-+	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++		khandle = &pending_handle(mmap_idx, k_idx, i);
 +
-+	for_each_online_pgdat(pgdat) {
-+               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+			page = pfn_to_page(pgdat->node_start_pfn + i);
-+			total++;
-+			if (PageReserved(page))
-+				reserved++;
-+			else if (PageSwapCache(page))
-+				cached++;
-+			else if (page_count(page))
-+				shared += page_count(page) - 1;
-+               }
-+	}
-+	printk(KERN_INFO "%lu pages of RAM\n", total);
-+	printk(KERN_INFO "%lu reserved pages\n",reserved);
-+	printk(KERN_INFO "%lu pages shared\n",shared);
-+	printk(KERN_INFO "%lu pages swap cached\n",cached);
-+}
++		if (khandle->kernel != INVALID_GRANT_HANDLE) {
++			gnttab_set_unmap_op(&unmap[invcount],
++					    idx_to_kaddr(mmap_idx, k_idx, i),
++					    GNTMAP_host_map, khandle->kernel);
++			invcount++;
 +
-+int after_bootmem;
++			set_phys_to_machine(
++				__pa(idx_to_kaddr(mmap_idx, k_idx, i))
++				>> PAGE_SHIFT, INVALID_P2M_ENTRY);
++		}
 +
-+static __init void *spp_getpage(void)
-+{ 
-+	void *ptr;
-+	if (after_bootmem)
-+		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
-+	else if (start_pfn < table_end) {
-+		ptr = __va(start_pfn << PAGE_SHIFT);
-+		start_pfn++;
-+		memset(ptr, 0, PAGE_SIZE);
-+	} else
-+		ptr = alloc_bootmem_pages(PAGE_SIZE);
-+	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
-+		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++		if (khandle->user != INVALID_GRANT_HANDLE) {
++			BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++			if (create_lookup_pte_addr(
++				info->vma->vm_mm,
++				MMAP_VADDR(info->user_vstart, u_idx, i),
++				&ptep) !=0) {
++				WPRINTK("Couldn't get a pte addr!\n");
++				return;
++			}
 +
-+	Dprintk("spp_getpage %p\n", ptr);
-+	return ptr;
-+} 
++			gnttab_set_unmap_op(&unmap[invcount], ptep,
++					    GNTMAP_host_map
++					    | GNTMAP_application_map
++					    | GNTMAP_contains_pte,
++					    khandle->user);
++			invcount++;
++		}
++
++		BLKTAP_INVALIDATE_HANDLE(khandle);
++	}
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_unmap_grant_ref, unmap, invcount);
++	BUG_ON(ret);
++	
++	if (info->vma != NULL && !xen_feature(XENFEAT_auto_translated_physmap))
++		zap_page_range(info->vma, 
++			       MMAP_VADDR(info->user_vstart, u_idx, 0), 
++			       req->nr_pages << PAGE_SHIFT, NULL);
++}
 +
-+#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
 +
-+static inline pud_t *pud_offset_u(unsigned long address)
++static void print_stats(blkif_t *blkif)
 +{
-+	pud_t *pud = level3_user_pgt;
-+
-+	return pud + pud_index(address);
++	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d\n",
++	       current->comm, blkif->st_oo_req,
++	       blkif->st_rd_req, blkif->st_wr_req);
++	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++	blkif->st_rd_req = 0;
++	blkif->st_wr_req = 0;
++	blkif->st_oo_req = 0;
 +}
 +
-+static __init void set_pte_phys(unsigned long vaddr,
-+			 unsigned long phys, pgprot_t prot, int user_mode)
++int tap_blkif_schedule(void *arg)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
++	blkif_t *blkif = arg;
 +
-+	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++	blkif_get(blkif);
 +
-+	pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
-+	if (pgd_none(*pgd)) {
-+		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+		return;
-+	}
-+	pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
-+	if (pud_none(*pud)) {
-+		pmd = (pmd_t *) spp_getpage(); 
-+		make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pmd != pmd_offset(pud, 0)) {
-+			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+			return;
-+		}
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		pte = (pte_t *) spp_getpage();
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pte != pte_offset_kernel(pmd, 0)) {
-+			printk("PAGETABLE BUG #02!\n");
-+			return;
-+		}
-+	}
-+	if (pgprot_val(prot))
-+		new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
-+	else
-+		new_pte = __pte(0);
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: started\n", current->comm);
 +
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	if (!pte_none(*pte) &&
-+	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-+		pte_ERROR(*pte);
-+	set_pte(pte, new_pte);
++	while (!kthread_should_stop()) {
++		if (try_to_freeze())
++			continue;
 +
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
++		wait_event_interruptible(
++			blkif->wq,
++			blkif->waiting_reqs || kthread_should_stop());
++		wait_event_interruptible(
++			pending_free_wq,
++			!list_empty(&pending_free) || kthread_should_stop());
 +
-+static __init void set_pte_phys_ma(unsigned long vaddr,
-+				   unsigned long phys, pgprot_t prot)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
++		blkif->waiting_reqs = 0;
++		smp_mb(); /* clear flag *before* checking for work */
 +
-+	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++		if (do_block_io_op(blkif))
++			blkif->waiting_reqs = 1;
 +
-+	pgd = pgd_offset_k(vaddr);
-+	if (pgd_none(*pgd)) {
-+		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+		return;
++		if (log_stats && time_after(jiffies, blkif->st_print))
++			print_stats(blkif);
 +	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
 +
-+		pmd = (pmd_t *) spp_getpage(); 
-+		make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pmd != pmd_offset(pud, 0)) {
-+			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+			return;
-+		}
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		pte = (pte_t *) spp_getpage();
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pte != pte_offset_kernel(pmd, 0)) {
-+			printk("PAGETABLE BUG #02!\n");
-+			return;
-+		}
-+	}
-+	new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++	if (log_stats)
++		print_stats(blkif);
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: exiting\n", current->comm);
 +
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	set_pte(pte, new_pte);
++	blkif->xenblkd = NULL;
++	blkif_put(blkif);
 +
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
++	return 0;
 +}
 +
-+#define SET_FIXMAP_KERNEL 0
-+#define SET_FIXMAP_USER   1
++/******************************************************************
++ * COMPLETION CALLBACK -- Called by user level ioctl()
++ */
 +
-+/* NOTE: this is meant to be run only at boot */
-+void __init 
-+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++static int blktap_read_ufe_ring(tap_blkif_t *info)
 +{
-+	unsigned long address = __fix_to_virt(idx);
++	/* This is called to read responses from the UFE ring. */
++	RING_IDX i, j, rp;
++	blkif_response_t *resp;
++	blkif_t *blkif=NULL;
++	int pending_idx, usr_idx, mmap_idx;
++	pending_req_t *pending_req;
++	
++	if (!info)
++		return 0;
 +
-+	if (idx >= __end_of_fixed_addresses) {
-+		printk("Invalid __set_fixmap\n");
-+		return;
-+	}
-+	switch (idx) {
-+	case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
-+		set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
-+		break;
-+	default:
-+		set_pte_phys_ma(address, phys, prot);
-+		break;
-+	}
-+}
++	/* We currently only forward packets in INTERCEPT_FE mode. */
++	if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
++		return 0;
 +
-+/*
-+ * This only supports vsyscall area.
-+ */
-+void __init
-+__set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+	unsigned long address = __fix_to_virt(idx);
++	/* for each outstanding message on the UFEring  */
++	rp = info->ufe_ring.sring->rsp_prod;
++	rmb();
++        
++	for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
++		blkif_response_t res;
++		resp = RING_GET_RESPONSE(&info->ufe_ring, i);
++		memcpy(&res, resp, sizeof(res));
++		mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
++		++info->ufe_ring.rsp_cons;
 +
-+	if (idx >= __end_of_fixed_addresses) {
-+		printk("Invalid __set_fixmap\n");
-+		return;
-+	}
++		/*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
++		usr_idx = (int)res.id;
++		pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++		mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
 +
-+	set_pte_phys(address, phys, prot, SET_FIXMAP_USER); 
-+}
++		if ( (mmap_idx >= mmap_alloc) || 
++		   (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
++			WPRINTK("Incorrect req map"
++			       "[%d], internal map [%d,%d (%d)]\n", 
++			       usr_idx, mmap_idx, 
++			       ID_TO_IDX(info->idx_map[usr_idx]),
++			       MASK_PEND_IDX(
++				       ID_TO_IDX(info->idx_map[usr_idx])));
 +
-+unsigned long __initdata table_start, table_end; 
++		pending_req = &pending_reqs[mmap_idx][pending_idx];
++		blkif = pending_req->blkif;
 +
-+static __meminit void *alloc_static_page(unsigned long *phys)
-+{
-+	unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++		for (j = 0; j < pending_req->nr_pages; j++) {
 +
-+	if (after_bootmem) {
-+		void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
++			unsigned long kvaddr, uvaddr;
++			struct page **map = info->vma->vm_private_data;
++			struct page *pg;
++			int offset;
 +
-+		*phys = __pa(adr);
-+		return adr;
++			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
++			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
++
++			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++			ClearPageReserved(pg);
++			offset = (uvaddr - info->vma->vm_start) 
++				>> PAGE_SHIFT;
++			map[offset] = NULL;
++		}
++		fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
++		info->idx_map[usr_idx] = INVALID_REQ;
++		make_response(blkif, pending_req->id, res.operation,
++			      res.status);
++		blkif_put(pending_req->blkif);
++		free_req(pending_req);
 +	}
++		
++	return 0;
++}
 +
-+	*phys = start_pfn << PAGE_SHIFT;
-+	start_pfn++;
-+	memset((void *)va, 0, PAGE_SIZE);
-+	return (void *)va;
-+} 
 +
-+#define PTE_SIZE PAGE_SIZE
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
 +
-+static inline void __set_pte(pte_t *dst, pte_t val)
++static void blkif_notify_work(blkif_t *blkif)
 +{
-+	*dst = val;
++	blkif->waiting_reqs = 1;
++	wake_up(&blkif->wq);
 +}
 +
-+static inline int make_readonly(unsigned long paddr)
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	extern char __vsyscall_0;
-+	int readonly = 0;
++	blkif_notify_work(dev_id);
++	return IRQ_HANDLED;
++}
 +
-+	/* Make new page tables read-only. */
-+	if (!xen_feature(XENFEAT_writable_page_tables)
-+	    && (paddr >= (table_start << PAGE_SHIFT))
-+	    && (paddr < (table_end << PAGE_SHIFT)))
-+		readonly = 1;
-+	/* Make old page tables read-only. */
-+	if (!xen_feature(XENFEAT_writable_page_tables)
-+	    && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
-+	    && (paddr < (start_pfn << PAGE_SHIFT)))
-+		readonly = 1;
 +
-+	/*
-+	 * No need for writable mapping of kernel image. This also ensures that
-+	 * page and descriptor tables embedded inside don't have writable
-+	 * mappings. Exclude the vsyscall area here, allowing alternative
-+	 * instruction patching to work.
-+	 */
-+	if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
-+	    && !(paddr >= __pa_symbol(&__vsyscall_0)
-+	         && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
-+		readonly = 1;
 +
-+	return readonly;
-+}
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++static int print_dbug = 1;
++static int do_block_io_op(blkif_t *blkif)
++{
++	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++	blkif_request_t req;
++	pending_req_t *pending_req;
++	RING_IDX rc, rp;
++	int more_to_do = 0;
++	tap_blkif_t *info;
++
++	rc = blk_rings->common.req_cons;
++	rp = blk_rings->common.sring->req_prod;
++	rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++	/*Check blkif has corresponding UE ring*/
++	if (blkif->dev_num < 0) {
++		/*oops*/
++		if (print_dbug) {
++			WPRINTK("Corresponding UE " 
++			       "ring does not exist!\n");
++			print_dbug = 0; /*We only print this message once*/
++		}
++		return 0;
++	}
++
++	info = tapfds[blkif->dev_num];
++
++	if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
++		if (print_dbug) {
++			WPRINTK("Can't get UE info!\n");
++			print_dbug = 0;
++		}
++		return 0;
++	}
++
++	while (rc != rp) {
++		
++		if (RING_FULL(&info->ufe_ring)) {
++			WPRINTK("RING_FULL! More to do\n");
++			more_to_do = 1;
++			break;
++		}
++
++		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
++			WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
++			       " More to do\n");
++			more_to_do = 1;
++			break;		
++		}
++
++		pending_req = alloc_req();
++		if (NULL == pending_req) {
++			blkif->st_oo_req++;
++			more_to_do = 1;
++			break;
++		}
 +
-+#ifndef CONFIG_XEN
-+/* Must run before zap_low_mappings */
-+__init void *early_ioremap(unsigned long addr, unsigned long size)
-+{
-+	unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
++		if (kthread_should_stop()) {
++			more_to_do = 1;
++			break;
++		}
 +
-+	/* actually usually some more */
-+	if (size >= LARGE_PAGE_SIZE) {
-+		printk("SMBIOS area too long %lu\n", size);
-+		return NULL;
-+	}
-+	set_pmd(temp_mappings[0].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
-+	map += LARGE_PAGE_SIZE;
-+	set_pmd(temp_mappings[1].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
-+	__flush_tlb();
-+	return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
-+}
++		switch (blkif->blk_protocol) {
++		case BLKIF_PROTOCOL_NATIVE:
++			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
++			       sizeof(req));
++			break;
++		case BLKIF_PROTOCOL_X86_32:
++			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++			break;
++		case BLKIF_PROTOCOL_X86_64:
++			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++			break;
++		default:
++			BUG();
++		}
++		blk_rings->common.req_cons = ++rc; /* before make_response() */
 +
-+/* To avoid virtual aliases later */
-+__init void early_iounmap(void *addr, unsigned long size)
-+{
-+	if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
-+		printk("early_iounmap: bad address %p\n", addr);
-+	set_pmd(temp_mappings[0].pmd, __pmd(0));
-+	set_pmd(temp_mappings[1].pmd, __pmd(0));
-+	__flush_tlb();
-+}
-+#endif
++		/* Apply all sanity checks to /private copy/ of request. */
++		barrier();
 +
-+static void __meminit
-+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
-+{
-+	int i, k;
++		switch (req.operation) {
++		case BLKIF_OP_READ:
++			blkif->st_rd_req++;
++			dispatch_rw_block_io(blkif, &req, pending_req);
++			break;
 +
-+	for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
-+		unsigned long pte_phys;
-+		pte_t *pte, *pte_save;
++		case BLKIF_OP_WRITE:
++			blkif->st_wr_req++;
++			dispatch_rw_block_io(blkif, &req, pending_req);
++			break;
 +
-+		if (address >= end) {
-+			if (!after_bootmem)
-+				for (; i < PTRS_PER_PMD; i++, pmd++)
-+					set_pmd(pmd, __pmd(0));
++		default:
++			/* A good sign something is wrong: sleep for a while to
++			 * avoid excessive CPU consumption by a bad guest. */
++			msleep(1);
++			WPRINTK("unknown operation [%d]\n",
++				req.operation);
++			make_response(blkif, req.id, req.operation,
++				      BLKIF_RSP_ERROR);
++			free_req(pending_req);
 +			break;
 +		}
-+		pte = alloc_static_page(&pte_phys);
-+		pte_save = pte;
-+		for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
-+			unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
 +
-+			if ((address >= end) ||
-+			    ((address >> PAGE_SHIFT) >=
-+			     xen_start_info->nr_pages))
-+				pteval = 0;
-+			else if (make_readonly(address))
-+				pteval &= ~_PAGE_RW;
-+			__set_pte(pte, __pte(pteval & __supported_pte_mask));
-+		}
-+		pte = pte_save;
-+		early_make_page_readonly(pte, XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++		/* Yield point for this unbounded loop. */
++		cond_resched();
 +	}
++		
++	blktap_kick_user(blkif->dev_num);
++
++	return more_to_do;
 +}
 +
-+static void __meminit
-+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req)
 +{
-+	pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
-+
-+	if (pmd_none(*pmd)) {
-+		spin_lock(&init_mm.page_table_lock);
-+		phys_pmd_init(pmd, address, end);
-+		spin_unlock(&init_mm.page_table_lock);
-+		__flush_tlb_all();
-+	}
-+}
++	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++	int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
++	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++	unsigned int nseg;
++	int ret, i, nr_sects = 0;
++	tap_blkif_t *info;
++	blkif_request_t *target;
++	int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
++	int usr_idx;
++	uint16_t mmap_idx = pending_req->mem_idx;
 +
-+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
-+{ 
-+	long i = pud_index(address);
++	if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
++		goto fail_response;
 +
-+	pud = pud + i;
++	info = tapfds[blkif->dev_num];
++	if (info == NULL)
++		goto fail_response;
 +
-+	if (after_bootmem && pud_val(*pud)) {
-+		phys_pmd_update(pud, address, end);
-+		return;
++	/* Check we have space on user ring - should never fail. */
++	usr_idx = GET_NEXT_REQ(info->idx_map);
++	if (usr_idx == INVALID_REQ) {
++		BUG();
++		goto fail_response;
 +	}
 +
-+	for (; i < PTRS_PER_PUD; pud++, i++) {
-+		unsigned long paddr, pmd_phys;
-+		pmd_t *pmd;
-+
-+		paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
-+		if (paddr >= end)
-+			break;
++	/* Check that number of segments is sane. */
++	nseg = req->nr_segments;
++	if ( unlikely(nseg == 0) || 
++	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
++		WPRINTK("Bad number of segments in request (%d)\n", nseg);
++		goto fail_response;
++	}
++	
++	/* Make sure userspace is ready. */
++	if (!info->ring_ok) {
++		WPRINTK("blktap: ring not ready for requests!\n");
++		goto fail_response;
++	}
 +
-+		pmd = alloc_static_page(&pmd_phys);
-+		early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		spin_lock(&init_mm.page_table_lock);
-+		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-+		phys_pmd_init(pmd, paddr, end);
-+		spin_unlock(&init_mm.page_table_lock);
++	if (RING_FULL(&info->ufe_ring)) {
++		WPRINTK("blktap: fe_ring is full, can't add "
++			"IO Request will be dropped. %d %d\n",
++			RING_SIZE(&info->ufe_ring),
++			RING_SIZE(&blkif->blk_rings.common));
++		goto fail_response;
 +	}
-+	__flush_tlb();
-+} 
 +
-+void __init xen_init_pt(void)
-+{
-+	unsigned long addr, *page;
++	pending_req->blkif     = blkif;
++	pending_req->id        = req->id;
++	pending_req->operation = operation;
++	pending_req->status    = BLKIF_RSP_OKAY;
++	pending_req->nr_pages  = nseg;
++	op = 0;
++	for (i = 0; i < nseg; i++) {
++		unsigned long uvaddr;
++		unsigned long kvaddr;
++		uint64_t ptep;
++		uint32_t flags;
 +
-+	/* Find the initial pte page that was built for us. */
-+	page = (unsigned long *)xen_start_info->pt_base;
-+	addr = page[pgd_index(__START_KERNEL_map)];
-+	addr_to_page(addr, page);
-+	addr = page[pud_index(__START_KERNEL_map)];
-+	addr_to_page(addr, page);
++		uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++		kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	/* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
-+	   in kernel PTEs. We check that here. */
-+	if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
-+		unsigned long *pg;
-+		pte_t pte;
++		flags = GNTMAP_host_map;
++		if (operation == WRITE)
++			flags |= GNTMAP_readonly;
++		gnttab_set_map_op(&map[op], kvaddr, flags,
++				  req->seg[i].gref, blkif->domid);
++		op++;
 +
-+		/* Mess with the initial mapping of page 0. It's not needed. */
-+		BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
-+		addr = page[pmd_index(__START_KERNEL_map)];
-+		addr_to_page(addr, pg);
-+		pte.pte = pg[pte_index(__START_KERNEL_map)];
-+		BUG_ON(!(pte.pte & _PAGE_PRESENT));
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* Now map it to user. */
++			ret = create_lookup_pte_addr(info->vma->vm_mm, 
++						     uvaddr, &ptep);
++			if (ret) {
++				WPRINTK("Couldn't get a pte addr!\n");
++				goto fail_flush;
++			}
 +
-+		/* If _PAGE_USER isn't set, we obviously do not need it. */
-+		if (pte.pte & _PAGE_USER) {
-+			/* _PAGE_USER is needed, but is it set implicitly? */
-+			pte.pte &= ~_PAGE_USER;
-+			if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
-+							  pte, 0) != 0) ||
-+			    !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
-+				/* We need to explicitly specify _PAGE_USER. */
-+				__kernel_page_user = _PAGE_USER;
++			flags = GNTMAP_host_map | GNTMAP_application_map
++				| GNTMAP_contains_pte;
++			if (operation == WRITE)
++				flags |= GNTMAP_readonly;
++			gnttab_set_map_op(&map[op], ptep, flags,
++					  req->seg[i].gref, blkif->domid);
++			op++;
 +		}
++
++		nr_sects += (req->seg[i].last_sect - 
++			     req->seg[i].first_sect + 1);
 +	}
-+#endif
 +
-+	/* Construct mapping of initial pte page in our own directories. */
-+	init_level4_pgt[pgd_index(__START_KERNEL_map)] = 
-+		__pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
-+	level3_kernel_pgt[pud_index(__START_KERNEL_map)] = 
-+		__pud(__pa_symbol(level2_kernel_pgt) |
-+		      _KERNPG_TABLE);
-+	memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
++	BUG_ON(ret);
 +
-+	early_make_page_readonly(init_level4_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(init_level4_user_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level3_kernel_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level3_user_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level2_kernel_pgt,
-+				 XENFEAT_writable_page_tables);
++	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++		for (i = 0; i < (nseg*2); i+=2) {
++			unsigned long uvaddr;
++			unsigned long kvaddr;
++			unsigned long offset;
++			struct page *pg;
 +
-+	if (!xen_feature(XENFEAT_writable_page_tables)) {
-+		xen_pgd_pin(__pa_symbol(init_level4_pgt));
-+		xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
-+	}
++			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
++			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
 +
-+	set_pgd((pgd_t *)(init_level4_user_pgt + 511), 
-+		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
-+}
++			if (unlikely(map[i].status != 0)) {
++				WPRINTK("invalid kernel buffer -- "
++					"could not remap it\n");
++				ret |= 1;
++				map[i].handle = INVALID_GRANT_HANDLE;
++			}
 +
-+static void __init extend_init_mapping(unsigned long tables_space)
-+{
-+	unsigned long va = __START_KERNEL_map;
-+	unsigned long phys, addr, *pte_page;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
-+	unsigned long *page = (unsigned long *)init_level4_pgt;
++			if (unlikely(map[i+1].status != 0)) {
++				WPRINTK("invalid user buffer -- "
++					"could not remap it\n");
++				ret |= 1;
++				map[i+1].handle = INVALID_GRANT_HANDLE;
++			}
 +
-+	addr = page[pgd_index(va)];
-+	addr_to_page(addr, page);
-+	addr = page[pud_index(va)];
-+	addr_to_page(addr, page);
++			pending_handle(mmap_idx, pending_idx, i/2).kernel 
++				= map[i].handle;
++			pending_handle(mmap_idx, pending_idx, i/2).user   
++				= map[i+1].handle;
 +
-+	/* Kill mapping of low 1MB. */
-+	while (va < (unsigned long)&_text) {
-+		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		va += PAGE_SIZE;
-+	}
++			if (ret)
++				continue;
 +
-+	/* Ensure init mappings cover kernel text/data and initial tables. */
-+	while (va < (__START_KERNEL_map
-+		     + (start_pfn << PAGE_SHIFT)
-+		     + tables_space)) {
-+		pmd = (pmd_t *)&page[pmd_index(va)];
-+		if (pmd_none(*pmd)) {
-+			pte_page = alloc_static_page(&phys);
-+			early_make_page_readonly(
-+				pte_page, XENFEAT_writable_page_tables);
-+			set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
-+		} else {
-+			addr = page[pmd_index(va)];
-+			addr_to_page(addr, pte_page);
-+		}
-+		pte = (pte_t *)&pte_page[pte_index(va)];
-+		if (pte_none(*pte)) {
-+			new_pte = pfn_pte(
-+				(va - __START_KERNEL_map) >> PAGE_SHIFT, 
-+				__pgprot(_KERNPG_TABLE));
-+			xen_l1_entry_update(pte, new_pte);
++			set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++					    FOREIGN_FRAME(map[i].dev_bus_addr
++							  >> PAGE_SHIFT));
++			offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++			((struct page **)info->vma->vm_private_data)[offset] =
++				pg;
 +		}
-+		va += PAGE_SIZE;
-+	}
-+
-+	/* Finally, blow away any spurious initial mappings. */
-+	while (1) {
-+		pmd = (pmd_t *)&page[pmd_index(va)];
-+		if (pmd_none(*pmd))
-+			break;
-+		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		va += PAGE_SIZE;
-+	}
-+}
++	} else {
++		for (i = 0; i < nseg; i++) {
++			unsigned long uvaddr;
++			unsigned long kvaddr;
++			unsigned long offset;
++			struct page *pg;
 +
-+static void __init find_early_table_space(unsigned long end)
-+{
-+	unsigned long puds, pmds, ptes, tables;
++			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
 +
-+	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-+	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-+	ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++			if (unlikely(map[i].status != 0)) {
++				WPRINTK("invalid kernel buffer -- "
++					"could not remap it\n");
++				ret |= 1;
++				map[i].handle = INVALID_GRANT_HANDLE;
++			}
 +
-+	tables = round_up(puds * 8, PAGE_SIZE) + 
-+		round_up(pmds * 8, PAGE_SIZE) + 
-+		round_up(ptes * 8, PAGE_SIZE); 
++			pending_handle(mmap_idx, pending_idx, i).kernel 
++				= map[i].handle;
 +
-+	extend_init_mapping(tables);
++			if (ret)
++				continue;
 +
-+	table_start = start_pfn;
-+	table_end = table_start + (tables>>PAGE_SHIFT);
++			offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++			((struct page **)info->vma->vm_private_data)[offset] =
++				pg;
++		}
++	}
 +
-+	early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
-+		end, table_start << PAGE_SHIFT,
-+		(table_start << PAGE_SHIFT) + tables);
-+}
++	if (ret)
++		goto fail_flush;
 +
-+static void xen_finish_init_mapping(void)
-+{
-+	unsigned long i, start, end;
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		down_write(&info->vma->vm_mm->mmap_sem);
++	/* Mark mapped pages as reserved: */
++	for (i = 0; i < req->nr_segments; i++) {
++		unsigned long kvaddr;
++		struct page *pg;
 +
-+	/* Re-vector virtual addresses pointing into the initial
-+	   mapping to the just-established permanent ones. */
-+	xen_start_info = __va(__pa(xen_start_info));
-+	xen_start_info->pt_base = (unsigned long)
-+		__va(__pa(xen_start_info->pt_base));
-+	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+		phys_to_machine_mapping =
-+			__va(__pa(xen_start_info->mfn_list));
-+		xen_start_info->mfn_list = (unsigned long)
-+			phys_to_machine_mapping;
++		kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++		pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++		SetPageReserved(pg);
++		if (xen_feature(XENFEAT_auto_translated_physmap)) {
++			ret = vm_insert_page(info->vma,
++					     MMAP_VADDR(info->user_vstart,
++							usr_idx, i), pg);
++			if (ret) {
++				up_write(&info->vma->vm_mm->mmap_sem);
++				goto fail_flush;
++			}
++		}
 +	}
-+	if (xen_start_info->mod_start)
-+		xen_start_info->mod_start = (unsigned long)
-+			__va(__pa(xen_start_info->mod_start));
-+
-+	/* Destroy the Xen-created mappings beyond the kernel image as
-+	 * well as the temporary mappings created above. Prevents
-+	 * overlap with modules area (if init mapping is very big).
-+	 */
-+	start = PAGE_ALIGN((unsigned long)_end);
-+	end   = __START_KERNEL_map + (table_end << PAGE_SHIFT);
-+	for (; start < end; start += PAGE_SIZE)
-+		WARN_ON(HYPERVISOR_update_va_mapping(
-+			start, __pte_ma(0), 0));
-+
-+	/* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
-+	table_end = ~0UL;
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		up_write(&info->vma->vm_mm->mmap_sem);
++	
++	/*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
++	info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
 +
-+	/*
-+	 * Prefetch pte's for the bt_ioremap() area. It gets used before the
-+	 * boot-time allocator is online, so allocate-on-demand would fail.
-+	 */
-+	for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
-+		__set_fixmap(i, 0, __pgprot(0));
++	blkif_get(blkif);
++	/* Finally, write the request message to the user ring. */
++	target = RING_GET_REQUEST(&info->ufe_ring,
++				  info->ufe_ring.req_prod_pvt);
++	memcpy(target, req, sizeof(*req));
++	target->id = usr_idx;
++	wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
++	info->ufe_ring.req_prod_pvt++;
 +
-+	/* Switch to the real shared_info page, and clear the dummy page. */
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+	memset(empty_zero_page, 0, sizeof(empty_zero_page));
++	if (operation == READ)
++		blkif->st_rd_sect += nr_sects;
++	else if (operation == WRITE)
++		blkif->st_wr_sect += nr_sects;
 +
-+	/* Set up mapping of lowest 1MB of physical memory. */
-+	for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+		if (is_initial_xendomain())
-+			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+		else
-+			__set_fixmap(FIX_ISAMAP_BEGIN - i,
-+				     virt_to_mfn(empty_zero_page)
-+				     << PAGE_SHIFT,
-+				     PAGE_KERNEL_RO);
++	return;
 +
-+	/* Disable the 'start_pfn' allocator. */
-+	table_end = start_pfn;
++ fail_flush:
++	WPRINTK("Reached Fail_flush\n");
++	fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
++ fail_response:
++	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++	free_req(pending_req);
++	msleep(1); /* back off a bit */
 +}
 +
-+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
-+   This runs before bootmem is initialized and gets pages directly from the 
-+   physical memory. To access them they are temporarily mapped. */
-+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
-+{ 
-+	unsigned long next;
 +
-+	Dprintk("init_memory_mapping\n");
 +
-+	/* 
-+	 * Find space for the kernel direct mapping tables.
-+	 * Later we should allocate these tables in the local node of the memory
-+	 * mapped.  Unfortunately this is done currently before the nodes are 
-+	 * discovered.
-+	 */
-+	if (!after_bootmem)
-+		find_early_table_space(end);
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
 +
-+	start = (unsigned long)__va(start);
-+	end = (unsigned long)__va(end);
 +
-+	for (; start < end; start = next) {
-+		unsigned long pud_phys; 
-+		pgd_t *pgd = pgd_offset_k(start);
-+		pud_t *pud;
++static void make_response(blkif_t *blkif, u64 id,
++                          unsigned short op, int st)
++{
++	blkif_response_t  resp;
++	unsigned long     flags;
++	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++	int more_to_do = 0;
++	int notify;
 +
-+		if (after_bootmem) {
-+			pud = pud_offset(pgd, start & PGDIR_MASK);
-+			make_page_readonly(pud, XENFEAT_writable_page_tables);
-+			pud_phys = __pa(pud);
-+		} else {
-+			pud = alloc_static_page(&pud_phys);
-+			early_make_page_readonly(pud, XENFEAT_writable_page_tables);
-+		}
-+		next = start + PGDIR_SIZE;
-+		if (next > end) 
-+			next = end; 
-+		phys_pud_init(pud, __pa(start), __pa(next));
-+		if (!after_bootmem)
-+			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++	resp.id        = id;
++	resp.operation = op;
++	resp.status    = st;
++
++	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++	/* Place on the response ring for the relevant domain. */
++	switch (blkif->blk_protocol) {
++	case BLKIF_PROTOCOL_NATIVE:
++		memcpy(RING_GET_RESPONSE(&blk_rings->native,
++					 blk_rings->native.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	case BLKIF_PROTOCOL_X86_32:
++		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
++					 blk_rings->x86_32.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	case BLKIF_PROTOCOL_X86_64:
++		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
++					 blk_rings->x86_64.rsp_prod_pvt),
++		       &resp, sizeof(resp));
++		break;
++	default:
++		BUG();
 +	}
++	blk_rings->common.rsp_prod_pvt++;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 +
-+	if (!after_bootmem) {
-+		BUG_ON(start_pfn != table_end);
-+		xen_finish_init_mapping();
++	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++		/*
++		 * Tail check for pending requests. Allows frontend to avoid
++		 * notifications if requests are already in flight (lower
++		 * overheads and promotes batching).
++		 */
++		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++		more_to_do = 1;
 +	}
 +
-+	__flush_tlb_all();
-+}
-+
-+void __cpuinit zap_low_mappings(int cpu)
-+{
-+	/* this is not required for Xen */
-+#if 0
-+	swap_low_mappings();
-+#endif
++	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++	if (more_to_do)
++		blkif_notify_work(blkif);
++	if (notify)
++		notify_remote_via_irq(blkif->irq);
 +}
 +
-+/* Compute zone sizes for the DMA and DMA32 zones in a node. */
-+__init void
-+size_zones(unsigned long *z, unsigned long *h,
-+	   unsigned long start_pfn, unsigned long end_pfn)
++static int __init blkif_init(void)
 +{
-+ 	int i;
-+#ifndef CONFIG_XEN
-+ 	unsigned long w;
-+#endif
-+
-+ 	for (i = 0; i < MAX_NR_ZONES; i++)
-+ 		z[i] = 0;
++	int i, ret;
++	struct class *class;
 +
-+#ifndef CONFIG_XEN
-+ 	if (start_pfn < MAX_DMA_PFN)
-+ 		z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
-+ 	if (start_pfn < MAX_DMA32_PFN) {
-+ 		unsigned long dma32_pfn = MAX_DMA32_PFN;
-+ 		if (dma32_pfn > end_pfn)
-+ 			dma32_pfn = end_pfn;
-+ 		z[ZONE_DMA32] = dma32_pfn - start_pfn;
-+ 	}
-+ 	z[ZONE_NORMAL] = end_pfn - start_pfn;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+ 	/* Remove lower zones from higher ones. */
-+ 	w = 0;
-+ 	for (i = 0; i < MAX_NR_ZONES; i++) {
-+ 		if (z[i])
-+ 			z[i] -= w;
-+ 	        w += z[i];
++	INIT_LIST_HEAD(&pending_free);
++        for(i = 0; i < 2; i++) {
++		ret = req_increase();
++		if (ret)
++			break;
 +	}
++	if (i == 0)
++		return ret;
 +
-+	/* Compute holes */
-+	w = start_pfn;
-+	for (i = 0; i < MAX_NR_ZONES; i++) {
-+		unsigned long s = w;
-+		w += z[i];
-+		h[i] = e820_hole_size(s, w);
-+	}
++	tap_blkif_interface_init();
 +
-+	/* Add the space pace needed for mem_map to the holes too. */
-+	for (i = 0; i < MAX_NR_ZONES; i++)
-+		h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
++	alloc_pending_reqs = 0;
 +
-+	/* The 16MB DMA zone has the kernel and other misc mappings.
-+ 	   Account them too */
-+	if (h[ZONE_DMA]) {
-+		h[ZONE_DMA] += dma_reserve;
-+		if (h[ZONE_DMA] >= z[ZONE_DMA]) {
-+			printk(KERN_WARNING
-+				"Kernel too large and filling up ZONE_DMA?\n");
-+			h[ZONE_DMA] = z[ZONE_DMA];
-+		}
-+	}
-+#else
-+	z[ZONE_DMA] = end_pfn;
-+ 	for (i = 0; i < MAX_NR_ZONES; i++)
-+ 		h[i] = 0;
-+#endif
-+}
++	tap_blkif_xenbus_init();
 +
-+#ifndef CONFIG_NUMA
-+void __init paging_init(void)
-+{
-+	unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++	/* Dynamically allocate a major for this device */
++	ret = register_chrdev(0, "blktap", &blktap_fops);
 +
-+	memory_present(0, 0, end_pfn);
-+	sparse_init();
-+	size_zones(zones, holes, 0, end_pfn);
-+	free_area_init_node(0, NODE_DATA(0), zones,
-+			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++	if (ret < 0) {
++		WPRINTK("Couldn't register /dev/xen/blktap\n");
++		return -ENOMEM;
++	}	
++	
++	blktap_major = ret;
 +
-+	init_mm.context.pinned = 1;
-+}
-+#endif
++	/* tapfds[0] is always NULL */
++	blktap_next_minor++;
 +
-+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
-+   from the CPU leading to inconsistent cache lines. address and size
-+   must be aligned to 2MB boundaries. 
-+   Does nothing when the mapping doesn't exist. */
-+void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
-+{
-+	unsigned long end = address + size;
++	DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
 +
-+	BUG_ON(address & ~LARGE_PAGE_MASK);
-+	BUG_ON(size & ~LARGE_PAGE_MASK); 
-+	
-+	for (; address < end; address += LARGE_PAGE_SIZE) { 
-+		pgd_t *pgd = pgd_offset_k(address);
-+		pud_t *pud;
-+		pmd_t *pmd;
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, address);
-+		if (pud_none(*pud))
-+			continue; 
-+		pmd = pmd_offset(pud, address);
-+		if (!pmd || pmd_none(*pmd))
-+			continue; 
-+		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
-+			/* Could handle this, but it should not happen currently. */
-+			printk(KERN_ERR 
-+	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
-+			pmd_ERROR(*pmd); 
-+		}
-+		set_pmd(pmd, __pmd(0)); 		
++	/* Make sure the xen class exists */
++	if ((class = get_xen_class()) != NULL) {
++		/*
++		 * This will allow udev to create the blktap ctrl device.
++		 * We only want to create blktap0 first.  We don't want
++		 * to flood the sysfs system with needless blktap devices.
++		 * We only create the device when a request of a new device is
++		 * made.
++		 */
++		class_device_create(class, NULL,
++				    MKDEV(blktap_major, 0), NULL,
++				    "blktap0");
++	} else {
++		/* this is bad, but not fatal */
++		WPRINTK("blktap: sysfs xen_class not created\n");
 +	}
-+	__flush_tlb_all();
-+} 
 +
-+/*
-+ * Memory hotplug specific functions
-+ */
-+void online_page(struct page *page)
-+{
-+	ClearPageReserved(page);
-+	init_page_count(page);
-+	__free_page(page);
-+	totalram_pages++;
-+	num_physpages++;
-+}
++	DPRINTK("Blktap device successfully created\n");
 +
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+/*
-+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
-+ *	via probe interface of sysfs. If acpi notifies hot-add event, then it
-+ *	can tell node id by searching dsdt. But, probe interface doesn't have
-+ *	node id. So, return 0 as node id at this time.
-+ */
-+#ifdef CONFIG_NUMA
-+int memory_add_physaddr_to_nid(u64 start)
-+{
 +	return 0;
 +}
-+#endif
 +
-+/*
-+ * Memory is added always to NORMAL zone. This means you will never get
-+ * additional DMA/DMA32 memory.
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blktap/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blktap/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,122 @@
++/* 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+int arch_add_memory(int nid, u64 start, u64 size)
-+{
-+	struct pglist_data *pgdat = NODE_DATA(nid);
-+	struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
-+	unsigned long start_pfn = start >> PAGE_SHIFT;
-+	unsigned long nr_pages = size >> PAGE_SHIFT;
-+	int ret;
 +
-+	ret = __add_pages(zone, start_pfn, nr_pages);
-+	if (ret)
-+		goto error;
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
 +
-+	init_memory_mapping(start, (start + size -1));
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
 +
-+	return ret;
-+error:
-+	printk("%s: Problem encountered in __add_pages!\n", __func__);
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(arch_add_memory);
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++                                    __FILE__ , __LINE__ , ## _a )
 +
-+int remove_memory(u64 start, u64 size)
-+{
-+	return -EINVAL;
-+}
-+EXPORT_SYMBOL_GPL(remove_memory);
++#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
 +
-+#else /* CONFIG_MEMORY_HOTPLUG */
-+/*
-+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
-+ * just online the pages.
-+ */
-+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-+{
-+	int err = -EIO;
-+	unsigned long pfn;
-+	unsigned long total = 0, mem = 0;
-+	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
-+		if (pfn_valid(pfn)) {
-+			online_page(pfn_to_page(pfn));
-+			err = 0;
-+			mem++;
-+		}
-+		total++;
-+	}
-+	if (!err) {
-+		z->spanned_pages += total;
-+		z->present_pages += mem;
-+		z->zone_pgdat->node_spanned_pages += total;
-+		z->zone_pgdat->node_present_pages += mem;
-+	}
-+	return err;
-+}
-+#endif /* CONFIG_MEMORY_HOTPLUG */
++struct backend_info;
 +
-+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
-+			 kcore_vsyscall;
++typedef struct blkif_st {
++	/* Unique identifier for this interface. */
++	domid_t           domid;
++	unsigned int      handle;
++	/* Physical parameters of the comms window. */
++	unsigned int      irq;
++	/* Comms information. */
++	enum blkif_protocol blk_protocol;
++	blkif_back_rings_t blk_rings;
++	struct vm_struct *blk_ring_area;
++	/* Back pointer to the backend_info. */
++	struct backend_info *be;
++	/* Private fields. */
++	spinlock_t       blk_ring_lock;
++	atomic_t         refcnt;
 +
-+void __init mem_init(void)
-+{
-+	long codesize, reservedpages, datasize, initsize;
-+	unsigned long pfn;
++	wait_queue_head_t   wq;
++	struct task_struct  *xenblkd;
++	unsigned int        waiting_reqs;
++	request_queue_t     *plug;
 +
-+	contiguous_bitmap = alloc_bootmem_low_pages(
-+		(end_pfn + 2*BITS_PER_LONG) >> 3);
-+	BUG_ON(!contiguous_bitmap);
-+	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
++	/* statistics */
++	unsigned long       st_print;
++	int                 st_rd_req;
++	int                 st_wr_req;
++	int                 st_oo_req;
++	int                 st_rd_sect;
++	int                 st_wr_sect;
 +
-+	pci_iommu_alloc();
++	wait_queue_head_t waiting_to_free;
 +
-+	/* How many end-of-memory variables you have, grandma! */
-+	max_low_pfn = end_pfn;
-+	max_pfn = end_pfn;
-+	num_physpages = end_pfn;
-+	high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++	grant_handle_t shmem_handle;
++	grant_ref_t    shmem_ref;
++	
++	int		dev_num;
++	uint64_t        sectors;
++} blkif_t;
 +
-+	/* clear the zero-page */
-+	memset(empty_zero_page, 0, PAGE_SIZE);
++blkif_t *tap_alloc_blkif(domid_t domid);
++void tap_blkif_free(blkif_t *blkif);
++void tap_blkif_kmem_cache_free(blkif_t *blkif);
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
++		  unsigned int evtchn);
++void tap_blkif_unmap(blkif_t *blkif);
 +
-+	reservedpages = 0;
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b)					\
++	do {						\
++		if (atomic_dec_and_test(&(_b)->refcnt))	\
++			wake_up(&(_b)->waiting_to_free);\
++	} while (0)
 +
-+	/* this will put all low memory onto the freelists */
-+#ifdef CONFIG_NUMA
-+	totalram_pages = numa_free_all_bootmem();
-+#else
-+	totalram_pages = free_all_bootmem();
-+#endif
-+	/* XEN: init and count pages outside initial allocation. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+		ClearPageReserved(pfn_to_page(pfn));
-+		init_page_count(pfn_to_page(pfn));
-+		totalram_pages++;
-+	}
-+	reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
 +
-+	after_bootmem = 1;
++struct phys_req {
++	unsigned short       dev;
++	unsigned short       nr_sects;
++	struct block_device *bdev;
++	blkif_sector_t       sector_number;
++};
 +
-+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
++void tap_blkif_interface_init(void);
 +
-+	/* Register memory areas for /proc/kcore */
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+		   VMALLOC_END-VMALLOC_START);
-+	kclist_add(&kcore_kernel, &_stext, _end - _stext);
-+	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
-+	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
-+				 VSYSCALL_END - VSYSCALL_START);
++void tap_blkif_xenbus_init(void);
 +
-+	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
-+		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+		end_pfn << (PAGE_SHIFT-10),
-+		codesize >> 10,
-+		reservedpages << (PAGE_SHIFT-10),
-+		datasize >> 10,
-+		initsize >> 10);
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int tap_blkif_schedule(void *arg);
 +
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Sync boot_level4_pgt mappings with the init_level4_pgt
-+	 * except for the low identity mappings which are already zapped
-+	 * in init_level4_pgt. This sync-up is essential for AP's bringup
-+	 */
-+	memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
-+#endif
-+#endif
-+}
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
++void signal_tapdisk(int idx);
 +
-+void free_init_pages(char *what, unsigned long begin, unsigned long end)
-+{
-+	unsigned long addr;
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blktap/interface.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blktap/interface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * drivers/xen/blktap/interface.c
++ * 
++ * Block-device interface management.
++ * 
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 +
-+	if (begin >= end)
-+		return;
++ */
 +
-+	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(addr));
-+		init_page_count(virt_to_page(addr));
-+		memset((void *)(addr & ~(PAGE_SIZE-1)),
-+		       POISON_FREE_INITMEM, PAGE_SIZE); 
-+		if (addr >= __START_KERNEL_map) {
-+			/* make_readonly() reports all kernel addresses. */
-+			__make_page_writable(__va(__pa(addr)));
-+			if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
-+				pgd_t *pgd = pgd_offset_k(addr);
-+				pud_t *pud = pud_offset(pgd, addr);
-+				pmd_t *pmd = pmd_offset(pud, addr);
-+				pte_t *pte = pte_offset_kernel(pmd, addr);
++#include "common.h"
++#include <xen/evtchn.h>
 +
-+				xen_l1_entry_update(pte, __pte(0)); /* fallback */
-+			}
-+		}
-+		free_page(addr);
-+		totalram_pages++;
-+	}
-+}
++static kmem_cache_t *blkif_cachep;
 +
-+void free_initmem(void)
++blkif_t *tap_alloc_blkif(domid_t domid)
 +{
-+	memset(__initdata_begin, POISON_FREE_INITDATA,
-+		__initdata_end - __initdata_begin);
-+	free_init_pages("unused kernel memory",
-+			(unsigned long)(&__init_begin),
-+			(unsigned long)(&__init_end));
-+}
++	blkif_t *blkif;
 +
-+#ifdef CONFIG_DEBUG_RODATA
++	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++	if (!blkif)
++		return ERR_PTR(-ENOMEM);
 +
-+void mark_rodata_ro(void)
++	memset(blkif, 0, sizeof(*blkif));
++	blkif->domid = domid;
++	spin_lock_init(&blkif->blk_ring_lock);
++	atomic_set(&blkif->refcnt, 1);
++	init_waitqueue_head(&blkif->wq);
++	blkif->st_print = jiffies;
++	init_waitqueue_head(&blkif->waiting_to_free);
++
++	return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
 +{
-+	unsigned long addr = (unsigned long)__start_rodata;
++	struct gnttab_map_grant_ref op;
 +
-+	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
-+		change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++			  GNTMAP_host_map, shared_page, blkif->domid);
 +
-+	printk ("Write protecting the kernel read-only data: %luk\n",
-+			(__end_rodata - __start_rodata) >> 10);
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
 +
-+	/*
-+	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
-+	 * We do this after the printk so that if something went wrong in the
-+	 * change, the printk gets out at least to give a better debug hint
-+	 * of who is the culprit.
-+	 */
-+	global_flush_tlb();
-+}
-+#endif
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
++	}
 +
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+	free_init_pages("initrd memory", start, end);
-+}
-+#endif
++	blkif->shmem_ref = shared_page;
++	blkif->shmem_handle = op.handle;
 +
-+void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
-+{ 
-+	/* Should check here against the e820 map to avoid double free */ 
-+#ifdef CONFIG_NUMA
-+	int nid = phys_to_nid(phys);
-+  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
-+#else       		
-+	reserve_bootmem(phys, len);    
-+#endif
-+	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
-+		dma_reserve += len / PAGE_SIZE;
++	return 0;
 +}
 +
-+int kern_addr_valid(unsigned long addr) 
-+{ 
-+	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
-+       pgd_t *pgd;
-+       pud_t *pud;
-+       pmd_t *pmd;
-+       pte_t *pte;
++static void unmap_frontend_page(blkif_t *blkif)
++{
++	struct gnttab_unmap_grant_ref op;
 +
-+	if (above != 0 && above != -1UL)
-+		return 0; 
-+	
-+	pgd = pgd_offset_k(addr);
-+	if (pgd_none(*pgd))
-+		return 0;
++	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++			    GNTMAP_host_map, blkif->shmem_handle);
 +
-+	pud = pud_offset(pgd, addr);
-+	if (pud_none(*pud))
-+		return 0; 
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
++}
 +
-+	pmd = pmd_offset(pud, addr);
-+	if (pmd_none(*pmd))
-+		return 0;
-+	if (pmd_large(*pmd))
-+		return pfn_valid(pmd_pfn(*pmd));
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
++		  unsigned int evtchn)
++{
++	int err;
 +
-+	pte = pte_offset_kernel(pmd, addr);
-+	if (pte_none(*pte))
++	/* Already connected through? */
++	if (blkif->irq)
 +		return 0;
-+	return pfn_valid(pte_pfn(*pte));
-+}
 +
-+#ifdef CONFIG_SYSCTL
-+#include <linux/sysctl.h>
++	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++		return -ENOMEM;
 +
-+extern int exception_trace, page_fault_trace;
++	err = map_frontend_page(blkif, shared_page);
++	if (err) {
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
 +
-+static ctl_table debug_table2[] = {
-+	{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
-+	  proc_dointvec },
-+	{ 0, }
-+}; 
++	switch (blkif->blk_protocol) {
++	case BLKIF_PROTOCOL_NATIVE:
++	{
++		blkif_sring_t *sring;
++		sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++		break;
++	}
++	case BLKIF_PROTOCOL_X86_32:
++	{
++		blkif_x86_32_sring_t *sring_x86_32;
++		sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++		break;
++	}
++	case BLKIF_PROTOCOL_X86_64:
++	{
++		blkif_x86_64_sring_t *sring_x86_64;
++		sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++		break;
++	}
++	default:
++		BUG();
++	}
 +
-+static ctl_table debug_root_table2[] = { 
-+	{ .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, 
-+	   .child = debug_table2 }, 
-+	{ 0 }, 
-+}; 
++	err = bind_interdomain_evtchn_to_irqhandler(
++		blkif->domid, evtchn, tap_blkif_be_int,
++		0, "blkif-backend", blkif);
++	if (err < 0) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_rings.common.sring = NULL;
++		return err;
++	}
++	blkif->irq = err;
 +
-+static __init int x8664_sysctl_init(void)
-+{ 
-+	register_sysctl_table(debug_root_table2, 1);
 +	return 0;
 +}
-+__initcall(x8664_sysctl_init);
-+#endif
-+
-+/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
-+   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-+   not need special handling anymore. */
 +
-+static struct vm_area_struct gate_vma = {
-+	.vm_start = VSYSCALL_START,
-+	.vm_end = VSYSCALL_END,
-+	.vm_page_prot = PAGE_READONLY
-+};
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++void tap_blkif_unmap(blkif_t *blkif)
 +{
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_tsk_thread_flag(tsk, TIF_IA32))
-+		return NULL;
-+#endif
-+	return &gate_vma;
++	if (blkif->irq) {
++		unbind_from_irqhandler(blkif->irq, blkif);
++		blkif->irq = 0;
++	}
++	if (blkif->blk_rings.common.sring) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_rings.common.sring = NULL;
++	}
 +}
 +
-+int in_gate_area(struct task_struct *task, unsigned long addr)
++void tap_blkif_free(blkif_t *blkif)
 +{
-+	struct vm_area_struct *vma = get_gate_vma(task);
-+	if (!vma)
-+		return 0;
-+	return (addr >= vma->vm_start) && (addr < vma->vm_end);
++	atomic_dec(&blkif->refcnt);
++	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++	atomic_inc(&blkif->refcnt);
++
++	tap_blkif_unmap(blkif);
 +}
 +
-+/* Use this when you have no reliable task/vma, typically from interrupt
-+ * context.  It is less reliable than using the task's vma and may give
-+ * false positives.
-+ */
-+int in_gate_area_no_task(unsigned long addr)
++void tap_blkif_kmem_cache_free(blkif_t *blkif)
 +{
-+	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++	if (!atomic_dec_and_test(&blkif->refcnt))
++		BUG();
++	kmem_cache_free(blkif_cachep, blkif);
 +}
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/mm/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/mm/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/mm/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/mm/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -9,3 +9,13 @@ obj-$(CONFIG_K8_NUMA) += k8topology.o
- obj-$(CONFIG_ACPI_NUMA) += srat.o
- 
- hugetlbpage-y = ../../i386/mm/hugetlbpage.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+ioremap-y	+= ../../i386/mm/ioremap-xen.o
-+hypervisor-y	+= ../../i386/mm/hypervisor.o
-+obj-y		+= hypervisor.o
 +
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/mm/pageattr-xen.c tmp-linux-2.6-xen.patch/arch/x86_64/mm/pageattr-xen.c
---- pristine-linux-2.6.18.2/arch/x86_64/mm/pageattr-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/mm/pageattr-xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,433 @@
-+/* 
-+ * Copyright 2002 Andi Kleen, SuSE Labs. 
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */ 
++void __init tap_blkif_interface_init(void)
++{
++	blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t), 
++					 0, 0, NULL, NULL);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/blktap/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/blktap/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,479 @@
++/* drivers/xen/blktap/xenbus.c
++ *
++ * Xenbus code for blktap
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Based on the blkback xenbus code:
++ *
++ * Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/highmem.h>
++#include <stdarg.h>
 +#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+
-+#ifdef CONFIG_XEN
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
++#include <linux/kthread.h>
++#include <xen/xenbus.h>
++#include "common.h"
 +
-+LIST_HEAD(mm_unpinned);
-+DEFINE_SPINLOCK(mm_unpinned_lock);
 +
-+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
++struct backend_info
 +{
-+	struct page *page = virt_to_page(pt);
-+	unsigned long pfn = page_to_pfn(page);
-+	int rc;
++	struct xenbus_device *dev;
++	blkif_t *blkif;
++	struct xenbus_watch backend_watch;
++	int xenbus_id;
++	int group_added;
++};
 +
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)__va(pfn << PAGE_SHIFT),
-+		pfn_pte(pfn, flags), 0);
-+	if (rc)
-+		BUG();
-+}
 +
-+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static int blktap_remove(struct xenbus_device *dev);
++static int blktap_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id);
++static void tap_backend_changed(struct xenbus_watch *, const char **,
++			    unsigned int);
++static void tap_frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state);
++
++static int strsep_len(const char *str, char c, unsigned int len)
 +{
-+	pgd_t       *pgd;
-+	pud_t       *pud;
-+	pmd_t       *pmd;
-+	pte_t       *pte;
-+	int          g,u,m;
++        unsigned int i;
 +
-+	pgd = mm->pgd;
-+	/*
-+	 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
-+	 * be the 'current' task's pagetables (e.g., current may be 32-bit,
-+	 * but the pagetables may be for a 64-bit task).
-+	 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
-+	 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
-+	 */
-+	for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, 0);
-+		if (PTRS_PER_PUD > 1) /* not folded */ 
-+			mm_walk_set_prot(pud,flags);
-+		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+			if (pud_none(*pud))
-+				continue;
-+			pmd = pmd_offset(pud, 0);
-+			if (PTRS_PER_PMD > 1) /* not folded */ 
-+				mm_walk_set_prot(pmd,flags);
-+			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+				if (pmd_none(*pmd))
-+					continue;
-+				pte = pte_offset_kernel(pmd,0);
-+				mm_walk_set_prot(pte,flags);
-+			}
-+		}
-+	}
++        for (i = 0; str[i]; i++)
++                if (str[i] == c) {
++                        if (len == 0)
++                                return i;
++                        len--;
++                }
++        return (len == 0) ? i : -ERANGE;
 +}
 +
-+void mm_pin(struct mm_struct *mm)
++static long get_id(const char *str)
 +{
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
++        int len,end;
++        const char *ptr;
++        char *tptr, num[10];
++	
++        len = strsep_len(str, '/', 2);
++        end = strlen(str);
++        if ( (len < 0) || (end < 0) ) return -1;
++	
++        ptr = str + len + 1;
++        strncpy(num,ptr,end - len);
++        tptr = num + (end - (len + 1));
++        *tptr = '\0';
++	DPRINTK("Get_id called for %s (%s)\n",str,num);
++	
++        return simple_strtol(num, NULL, 10);
++}				
 +
-+	spin_lock(&mm->page_table_lock);
++static int blktap_name(blkif_t *blkif, char *buf)
++{
++	char *devpath, *devname;
++	struct xenbus_device *dev = blkif->be->dev;
 +
-+	mm_walk(mm, PAGE_KERNEL_RO);
-+	if (HYPERVISOR_update_va_mapping(
-+		(unsigned long)mm->pgd,
-+		pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+		UVMF_TLB_FLUSH))
-+		BUG();
-+	if (HYPERVISOR_update_va_mapping(
-+		(unsigned long)__user_pgd(mm->pgd),
-+		pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-+			PAGE_KERNEL_RO),
-+		UVMF_TLB_FLUSH))
-+		BUG();
-+	xen_pgd_pin(__pa(mm->pgd)); /* kernel */
-+	xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
-+	mm->context.pinned = 1;
-+	spin_lock(&mm_unpinned_lock);
-+	list_del(&mm->context.unpinned);
-+	spin_unlock(&mm_unpinned_lock);
++	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++	if (IS_ERR(devpath)) 
++		return PTR_ERR(devpath);
++	
++	if ((devname = strstr(devpath, "/dev/")) != NULL)
++		devname += strlen("/dev/");
++	else
++		devname  = devpath;
 +
-+	spin_unlock(&mm->page_table_lock);
++	snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
++	kfree(devpath);
++	
++	return 0;
 +}
 +
-+void mm_unpin(struct mm_struct *mm)
-+{
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
++/****************************************************************
++ *  sysfs interface for I/O requests of blktap device
++ */
 +
-+	spin_lock(&mm->page_table_lock);
++#define VBD_SHOW(name, format, args...)					\
++	static ssize_t show_##name(struct device *_dev,			\
++				   struct device_attribute *attr,	\
++				   char *buf)				\
++	{								\
++		struct xenbus_device *dev = to_xenbus_device(_dev);	\
++		struct backend_info *be = dev->dev.driver_data;		\
++									\
++		return sprintf(buf, format, ##args);			\
++	}								\
++	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 +
-+	xen_pgd_unpin(__pa(mm->pgd));
-+	xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-+	if (HYPERVISOR_update_va_mapping(
-+		(unsigned long)mm->pgd,
-+		pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0))
-+		BUG();
-+	if (HYPERVISOR_update_va_mapping(
-+		(unsigned long)__user_pgd(mm->pgd),
-+		pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-+			PAGE_KERNEL), 0))
-+		BUG();
-+	mm_walk(mm, PAGE_KERNEL);
-+	xen_tlb_flush();
-+	mm->context.pinned = 0;
-+	spin_lock(&mm_unpinned_lock);
-+	list_add(&mm->context.unpinned, &mm_unpinned);
-+	spin_unlock(&mm_unpinned_lock);
++VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
 +
-+	spin_unlock(&mm->page_table_lock);
-+}
++static struct attribute *tapstat_attrs[] = {
++	&dev_attr_oo_req.attr,
++	&dev_attr_rd_req.attr,
++	&dev_attr_wr_req.attr,
++	&dev_attr_rd_sect.attr,
++	&dev_attr_wr_sect.attr,
++	NULL
++};
 +
-+void mm_pin_all(void)
-+{
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
++static struct attribute_group tapstat_group = {
++	.name = "statistics",
++	.attrs = tapstat_attrs,
++};
 +
-+	/*
-+	 * Allow uninterrupted access to the mm_unpinned list. We don't
-+	 * actually take the mm_unpinned_lock as it is taken inside mm_pin().
-+	 * All other CPUs must be at a safe point (e.g., in stop_machine
-+	 * or offlined entirely).
-+	 */
-+	preempt_disable();
-+	while (!list_empty(&mm_unpinned))	
-+		mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
-+				  context.unpinned));
-+	preempt_enable();
++int xentap_sysfs_addif(struct xenbus_device *dev)
++{
++	int err;
++	struct backend_info *be = dev->dev.driver_data;
++	err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
++	if (!err)
++		be->group_added = 1;
++	return err;
 +}
 +
-+void _arch_dup_mmap(struct mm_struct *mm)
++void xentap_sysfs_delif(struct xenbus_device *dev)
 +{
-+	if (!mm->context.pinned)
-+		mm_pin(mm);
++	struct backend_info *be = dev->dev.driver_data;
++	sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
++	be->group_added = 0;
 +}
 +
-+void _arch_exit_mmap(struct mm_struct *mm)
++static int blktap_remove(struct xenbus_device *dev)
 +{
-+	struct task_struct *tsk = current;
++	struct backend_info *be = dev->dev.driver_data;
 +
-+	task_lock(tsk);
++	if (be->group_added)
++		xentap_sysfs_delif(be->dev);
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
++	if (be->blkif) {
++		if (be->blkif->xenblkd)
++			kthread_stop(be->blkif->xenblkd);
++		signal_tapdisk(be->blkif->dev_num);
++		tap_blkif_free(be->blkif);
++		tap_blkif_kmem_cache_free(be->blkif);
++		be->blkif = NULL;
++	}
++	kfree(be);
++	dev->dev.driver_data = NULL;
++	return 0;
++}
 +
-+	/*
-+	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+	 */
-+	if (tsk->active_mm == mm) {
-+		tsk->active_mm = &init_mm;
-+		atomic_inc(&init_mm.mm_count);
++static void tap_update_blkif_status(blkif_t *blkif)
++{ 
++	int err;
++	char name[TASK_COMM_LEN];
 +
-+		switch_mm(mm, &init_mm, tsk);
++	/* Not ready to connect? */
++	if(!blkif->irq || !blkif->sectors) {
++		return;
++	} 
 +
-+		atomic_dec(&mm->mm_count);
-+		BUG_ON(atomic_read(&mm->mm_count) == 0);
++	/* Already connected? */
++	if (blkif->be->dev->state == XenbusStateConnected)
++		return;
++
++	/* Attempt to connect: exit if we fail to. */
++	connect(blkif->be);
++	if (blkif->be->dev->state != XenbusStateConnected)
++		return;
++
++	err = blktap_name(blkif, name);
++	if (err) {
++		xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
++		return;
 +	}
 +
-+	task_unlock(tsk);
++	if (!blkif->be->group_added) {
++		err = xentap_sysfs_addif(blkif->be->dev);
++		if (err) {
++			xenbus_dev_fatal(blkif->be->dev, err, 
++					 "creating sysfs entries");
++			return;
++		}
++	}
 +
-+	if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
-+	     !mm->context.has_foreign_mappings )
-+		mm_unpin(mm);
++	blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
++	if (IS_ERR(blkif->xenblkd)) {
++		err = PTR_ERR(blkif->xenblkd);
++		blkif->xenblkd = NULL;
++		xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
++		WPRINTK("Error starting thread\n");
++	}
 +}
 +
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++/**
++ * Entry point to this code when a new device is created.  Allocate
++ * the basic structures, and watch the store waiting for the
++ * user-space program to tell us the physical device info.  Switch to
++ * InitWait.
++ */
++static int blktap_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
 +{
-+	struct page *pte;
-+
-+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+	if (pte) {
-+		SetPageForeign(pte, pte_free);
-+		init_page_count(pte);
++	int err;
++	struct backend_info *be = kzalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
 +	}
-+	return pte;
-+}
 +
-+void pte_free(struct page *pte)
-+{
-+	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++	be->dev = dev;
++	dev->dev.driver_data = be;
++	be->xenbus_id = get_id(dev->nodename);
 +
-+	if (!pte_write(*virt_to_ptep(va)))
-+		if (HYPERVISOR_update_va_mapping(
-+			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
-+			BUG();
++	be->blkif = tap_alloc_blkif(dev->otherend_id);
++	if (IS_ERR(be->blkif)) {
++		err = PTR_ERR(be->blkif);
++		be->blkif = NULL;
++		xenbus_dev_fatal(dev, err, "creating block interface");
++		goto fail;
++	}
 +
-+	ClearPageForeign(pte);
-+	init_page_count(pte);
++	/* setup back pointer */
++	be->blkif->be = be;
++	be->blkif->sectors = 0;
 +
-+	__free_page(pte);
++	/* set a watch on disk info, waiting for userspace to update details*/
++	err = xenbus_watch_path2(dev, dev->nodename, "info",
++				 &be->backend_watch, tap_backend_changed);
++	if (err)
++		goto fail;
++	
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
++	if (err)
++		goto fail;
++	return 0;
++
++fail:
++	DPRINTK("blktap probe failed\n");
++	blktap_remove(dev);
++	return err;
 +}
-+#endif	/* CONFIG_XEN */
 +
-+static inline pte_t *lookup_address(unsigned long address) 
-+{ 
-+	pgd_t *pgd = pgd_offset_k(address);
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	if (pgd_none(*pgd))
-+		return NULL;
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return NULL; 
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return NULL; 
-+	if (pmd_large(*pmd))
-+		return (pte_t *)pmd;
-+	pte = pte_offset_kernel(pmd, address);
-+	if (pte && !pte_present(*pte))
-+		pte = NULL; 
-+	return pte;
-+} 
 +
-+static struct page *split_large_page(unsigned long address, pgprot_t prot,
-+				     pgprot_t ref_prot)
-+{ 
-+	int i; 
-+	unsigned long addr;
-+	struct page *base = alloc_pages(GFP_KERNEL, 0);
-+	pte_t *pbase;
-+	if (!base) 
-+		return NULL;
-+	/*
-+	 * page_private is used to track the number of entries in
-+	 * the page table page have non standard attributes.
++/**
++ * Callback received when the user space code has placed the device
++ * information in xenstore. 
++ */
++static void tap_backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	int err;
++	unsigned long info;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++	
++	/** 
++	 * Check to see whether userspace code has opened the image 
++	 * and written sector
++	 * and disk info to xenstore
 +	 */
-+	SetPagePrivate(base);
-+	page_private(base) = 0;
-+
-+	address = __pa(address);
-+	addr = address & LARGE_PAGE_MASK; 
-+	pbase = (pte_t *)page_address(base);
-+	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-+		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
-+				   addr == address ? prot : ref_prot);
++	err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info, 
++			    NULL);
++	if (XENBUS_EXIST_ERR(err))
++		return;
++	if (err) {
++		xenbus_dev_error(dev, err, "getting info");
++		return;
 +	}
-+	return base;
-+} 
-+
-+
-+static void flush_kernel_map(void *address) 
-+{
-+	if (0 && address && cpu_has_clflush) {
-+		/* is this worth it? */ 
-+		int i;
-+		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
-+			asm volatile("clflush (%0)" :: "r" (address + i)); 
-+	} else
-+		asm volatile("wbinvd":::"memory"); 
-+	if (address)
-+		__flush_tlb_one(address);
-+	else
-+		__flush_tlb_all();
-+}
 +
++	DPRINTK("Userspace update on disk info, %lu\n",info);
 +
-+static inline void flush_map(unsigned long address)
-+{	
-+	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
-+}
++	err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu", 
++			    &be->blkif->sectors, NULL);
 +
-+static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++	/* Associate tap dev with domid*/
++	be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id, 
++					  be->blkif);
++	DPRINTK("Thread started for domid [%d], connecting disk\n", 
++		be->blkif->dev_num);
 +
-+static inline void save_page(struct page *fpage)
-+{
-+	fpage->lru.next = (struct list_head *)deferred_pages;
-+	deferred_pages = fpage;
++	tap_update_blkif_status(be->blkif);
 +}
 +
-+/* 
-+ * No more special protections in this 2/4MB area - revert to a
-+ * large page again. 
++/**
++ * Callback received when the frontend's state changes.
 + */
-+static void revert_page(unsigned long address, pgprot_t ref_prot)
++static void tap_frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t large_pte;
++	struct backend_info *be = dev->dev.driver_data;
++	int err;
 +
-+	pgd = pgd_offset_k(address);
-+	BUG_ON(pgd_none(*pgd));
-+	pud = pud_offset(pgd,address);
-+	BUG_ON(pud_none(*pud));
-+	pmd = pmd_offset(pud, address);
-+	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-+	pgprot_val(ref_prot) |= _PAGE_PSE;
-+	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
-+	set_pte((pte_t *)pmd, large_pte);
-+}      
++	DPRINTK("\n");
 +
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-+				   pgprot_t ref_prot)
-+{ 
-+	pte_t *kpte; 
-+	struct page *kpte_page;
-+	unsigned kpte_flags;
-+	pgprot_t ref_prot2;
-+	kpte = lookup_address(address);
-+	if (!kpte) return 0;
-+	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-+	kpte_flags = pte_val(*kpte); 
-+	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
-+		if ((kpte_flags & _PAGE_PSE) == 0) { 
-+			set_pte(kpte, pfn_pte(pfn, prot));
-+		} else {
-+ 			/*
-+			 * split_large_page will take the reference for this
-+			 * change_page_attr on the split page.
-+ 			 */
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++		if (dev->state == XenbusStateClosed) {
++			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++			       __FUNCTION__, dev->nodename);
++			xenbus_switch_state(dev, XenbusStateInitWait);
++		}
++		break;
 +
-+			struct page *split;
-+			ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
++	case XenbusStateInitialised:
++	case XenbusStateConnected:
++		/* Ensure we connect even when two watches fire in 
++		   close successsion and we miss the intermediate value 
++		   of frontend_state. */
++		if (dev->state == XenbusStateConnected)
++			break;
 +
-+			split = split_large_page(address, prot, ref_prot2);
-+			if (!split)
-+				return -ENOMEM;
-+			set_pte(kpte,mk_pte(split, ref_prot2));
-+			kpte_page = split;
-+		}	
-+		page_private(kpte_page)++;
-+	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
-+		set_pte(kpte, pfn_pte(pfn, ref_prot));
-+		BUG_ON(page_private(kpte_page) == 0);
-+		page_private(kpte_page)--;
-+	} else
-+		BUG();
++		err = connect_ring(be);
++		if (err)
++			break;
++		tap_update_blkif_status(be->blkif);
++		break;
 +
-+	/* on x86-64 the direct mapping set at boot is not using 4k pages */
-+	/*
-+	 * ..., but the XEN guest kernels (currently) do:
-+	 * If the pte was reserved, it means it was created at boot
-+	 * time (not via split_large_page) and in turn we must not
-+	 * replace it with a large page.
-+	 */
-+#ifndef CONFIG_XEN
-+ 	BUG_ON(PageReserved(kpte_page));
-+#else
-+	if (PageReserved(kpte_page))
-+		return 0;
-+#endif
++	case XenbusStateClosing:
++		if (be->blkif->xenblkd) {
++			kthread_stop(be->blkif->xenblkd);
++			be->blkif->xenblkd = NULL;
++		}
++		tap_blkif_free(be->blkif);
++		xenbus_switch_state(dev, XenbusStateClosing);
++		break;
 +
-+	if (page_private(kpte_page) == 0) {
-+		save_page(kpte_page);
-+		revert_page(address, ref_prot);
++	case XenbusStateClosed:
++		xenbus_switch_state(dev, XenbusStateClosed);
++		if (xenbus_dev_is_online(dev))
++			break;
++		/* fall through if not online */
++	case XenbusStateUnknown:
++		device_unregister(&dev->dev);
++		break;
++
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++				 frontend_state);
++		break;
 +	}
-+	return 0;
-+} 
++}
 +
-+/*
-+ * Change the page attributes of an page in the linear mapping.
-+ *
-+ * This should be used when a page is mapped with a different caching policy
-+ * than write-back somewhere - some CPUs do not like it when mappings with
-+ * different caching policies exist. This changes the page attributes of the
-+ * in kernel linear mapping too.
-+ * 
-+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
-+ * This function only deals with the kernel linear map.
-+ * 
-+ * Caller must call global_flush_tlb() after this.
++
++/**
++ * Switch to Connected state.
 + */
-+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++static void connect(struct backend_info *be)
 +{
-+	int err = 0; 
-+	int i; 
++	int err;
 +
-+	down_write(&init_mm.mmap_sem);
-+	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-+		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++	struct xenbus_device *dev = be->dev;
 +
-+		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-+		if (err) 
-+			break; 
-+		/* Handle kernel mapping too which aliases part of the
-+		 * lowmem */
-+		if (__pa(address) < KERNEL_TEXT_SIZE) {
-+			unsigned long addr2;
-+			pgprot_t prot2 = prot;
-+			addr2 = __START_KERNEL_map + __pa(address);
-+ 			pgprot_val(prot2) &= ~_PAGE_NX;
-+			err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
-+		} 
-+	} 	
-+	up_write(&init_mm.mmap_sem); 
-+	return err;
-+}
++	err = xenbus_switch_state(dev, XenbusStateConnected);
++	if (err)
++		xenbus_dev_fatal(dev, err, "switching to Connected state",
++				 dev->nodename);
 +
-+/* Don't call this for MMIO areas that may not have a mem_map entry */
-+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+	return change_page_attr_addr(addr, numpages, prot);
++	return;
 +}
 +
-+void global_flush_tlb(void)
-+{ 
-+	struct page *dpage;
-+
-+	down_read(&init_mm.mmap_sem);
-+	dpage = xchg(&deferred_pages, NULL);
-+	up_read(&init_mm.mmap_sem);
-+
-+	flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
-+	while (dpage) {
-+		struct page *tmp = dpage;
-+		dpage = (struct page *)dpage->lru.next;
-+		ClearPagePrivate(tmp);
-+		__free_page(tmp);
-+	} 
-+} 
-+
-+EXPORT_SYMBOL(change_page_attr);
-+EXPORT_SYMBOL(global_flush_tlb);
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/oprofile/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/oprofile/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/oprofile/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/oprofile/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -11,9 +11,15 @@ DRIVER_OBJS = $(addprefix ../../../drive
- 	oprofilefs.o oprofile_stats.o \
- 	timer_int.o )
- 
-+ifdef CONFIG_XEN
-+XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
-+			 xenoprofile.o)
-+OPROFILE-y := xenoprof.o
-+else
- OPROFILE-y := init.o backtrace.o
- OPROFILE-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o op_model_p4.o \
- 				     op_model_ppro.o
- OPROFILE-$(CONFIG_X86_IO_APIC)    += nmi_timer_int.o 
--
--oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
-+endif
-+oprofile-y = $(DRIVER_OBJS) $(XENOPROF_COMMON_OBJS) \
-+	     $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
-diff -Nurp pristine-linux-2.6.18.2/arch/x86_64/pci/Makefile tmp-linux-2.6-xen.patch/arch/x86_64/pci/Makefile
---- pristine-linux-2.6.18.2/arch/x86_64/pci/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/arch/x86_64/pci/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -15,11 +15,23 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
- 
- obj-$(CONFIG_NUMA)	+= k8-bus.o
- 
-+# pcifront should be after mmconfig.o and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront.o
-+
- direct-y += ../../i386/pci/direct.o
- acpi-y   += ../../i386/pci/acpi.o
-+pcifront-y += ../../i386/pci/pcifront.o
- legacy-y += ../../i386/pci/legacy.o
- irq-y    += ../../i386/pci/irq.o
- common-y += ../../i386/pci/common.o
- fixup-y  += ../../i386/pci/fixup.o
- i386-y  += ../../i386/pci/i386.o
- init-y += ../../i386/pci/init.o
-+
-+ifdef CONFIG_XEN
-+irq-y		:= ../../i386/pci/irq-xen.o
-+include $(srctree)/scripts/Makefile.xen
 +
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/acpi/Kconfig tmp-linux-2.6-xen.patch/drivers/acpi/Kconfig
---- pristine-linux-2.6.18.2/drivers/acpi/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/acpi/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -45,7 +45,7 @@ if ACPI
- 
- config ACPI_SLEEP
- 	bool "Sleep States"
--	depends on X86 && (!SMP || SUSPEND_SMP)
-+	depends on X86 && (!SMP || SUSPEND_SMP) && !XEN
- 	depends on PM
- 	default y
- 	---help---
-@@ -305,6 +305,7 @@ config ACPI_SYSTEM
- config X86_PM_TIMER
- 	bool "Power Management Timer Support" if EMBEDDED
- 	depends on X86
-+	depends on !XEN
- 	default y
- 	help
- 	  The Power Management Timer is available on all ACPI-capable,
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/mem.c tmp-linux-2.6-xen.patch/drivers/char/mem.c
---- pristine-linux-2.6.18.2/drivers/char/mem.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/mem.c	2007-07-30 16:35:11.000000000 +0200
-@@ -101,6 +101,7 @@ static inline int valid_mmap_phys_addr_r
- }
- #endif
- 
-+#ifndef ARCH_HAS_DEV_MEM
- /*
-  * This funcion reads the *physical* memory. The f_pos points directly to the 
-  * memory location. 
-@@ -223,6 +224,7 @@ static ssize_t write_mem(struct file * f
- 	*ppos += written;
- 	return written;
- }
-+#endif
- 
- #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
- static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-@@ -776,6 +778,7 @@ static int open_port(struct inode * inod
- #define open_kmem	open_mem
- #define open_oldmem	open_mem
- 
-+#ifndef ARCH_HAS_DEV_MEM
- static const struct file_operations mem_fops = {
- 	.llseek		= memory_lseek,
- 	.read		= read_mem,
-@@ -783,6 +786,9 @@ static const struct file_operations mem_
- 	.mmap		= mmap_mem,
- 	.open		= open_mem,
- };
-+#else
-+extern const struct file_operations mem_fops;
-+#endif
- 
- static const struct file_operations kmem_fops = {
- 	.llseek		= memory_lseek,
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/Kconfig tmp-linux-2.6-xen.patch/drivers/char/tpm/Kconfig
---- pristine-linux-2.6.18.2/drivers/char/tpm/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -58,5 +58,13 @@ config TCG_INFINEON
- 	  Further information on this driver and the supported hardware
- 	  can be found at http://www.prosec.rub.de/tpm
- 
--endmenu
-+config TCG_XEN
-+	tristate "XEN TPM Interface"
-+	depends on TCG_TPM && XEN
-+	---help---
-+	  If you want to make TPM support available to a Xen user domain,
-+	  say Yes and it will be accessible from within Linux.
-+	  To compile this driver as a module, choose M here; the module
-+	  will be called tpm_xenu.
- 
-+endmenu
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/Makefile tmp-linux-2.6-xen.patch/drivers/char/tpm/Makefile
---- pristine-linux-2.6.18.2/drivers/char/tpm/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
- obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
- obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
- obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
-+obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
-+tpm_xenu-y = tpm_xen.o tpm_vtpm.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/tpm.h tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm.h
---- pristine-linux-2.6.18.2/drivers/char/tpm/tpm.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm.h	2007-07-30 16:35:11.000000000 +0200
-@@ -105,6 +105,9 @@ struct tpm_chip {
- 	struct dentry **bios_dir;
- 
- 	struct list_head list;
-+#ifdef CONFIG_XEN
-+	void *priv;
-+#endif
- };
- 
- #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
-@@ -121,6 +124,18 @@ static inline void tpm_write_index(int b
- 	outb(value & 0xFF, base+1);
- }
- 
-+#ifdef CONFIG_XEN
-+static inline void *chip_get_private(const struct tpm_chip *chip)
++static int connect_ring(struct backend_info *be)
 +{
-+	return chip->priv;
-+}
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	char protocol[64];
++	int err;
 +
-+static inline void chip_set_private(struct tpm_chip *chip, void *priv)
-+{
-+	chip->priv = priv;
-+}
-+#endif
++	DPRINTK("%s\n", dev->otherend);
 +
- extern void tpm_get_timeouts(struct tpm_chip *);
- extern void tpm_gen_interrupt(struct tpm_chip *);
- extern void tpm_continue_selftest(struct tpm_chip *);
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/tpm_vtpm.c tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_vtpm.c
---- pristine-linux-2.6.18.2/drivers/char/tpm/tpm_vtpm.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_vtpm.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,542 @@
-+/*
-+ * Copyright (C) 2006 IBM Corporation
-+ *
-+ * Authors:
-+ * Stefan Berger <stefanb at us.ibm.com>
-+ *
-+ * Generic device driver part for device drivers in a virtualized
-+ * environment.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ *
-+ */
++	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 
++			    &ring_ref, "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
 +
-+#include <asm/uaccess.h>
-+#include <linux/list.h>
-+#include <linux/device.h>
-+#include <linux/interrupt.h>
-+#include <linux/platform_device.h>
-+#include "tpm.h"
-+#include "tpm_vtpm.h"
++	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++			    "%63s", protocol, NULL);
++	if (err)
++		strcpy(protocol, "unspecified, assuming native");
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++	else {
++		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++		return -1;
++	}
++	printk(KERN_INFO
++	       "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 +
-+/* read status bits */
-+enum {
-+	STATUS_BUSY = 0x01,
-+	STATUS_DATA_AVAIL = 0x02,
-+	STATUS_READY = 0x04
-+};
++	/* Map the shared frame, irq etc. */
++	err = tap_blkif_map(be->blkif, ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++				 ring_ref, evtchn);
++		return err;
++	} 
 +
-+struct transmission {
-+	struct list_head next;
++	return 0;
++}
 +
-+	unsigned char *request;
-+	size_t  request_len;
-+	size_t  request_buflen;
 +
-+	unsigned char *response;
-+	size_t  response_len;
-+	size_t  response_buflen;
++/* ** Driver Registration ** */
 +
-+	unsigned int flags;
-+};
 +
-+enum {
-+	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++static const struct xenbus_device_id blktap_ids[] = {
++	{ "tap" },
++	{ "" }
 +};
 +
 +
-+enum {
-+	DATAEX_FLAG_QUEUED_ONLY = 0x1
++static struct xenbus_driver blktap = {
++	.name = "tap",
++	.owner = THIS_MODULE,
++	.ids = blktap_ids,
++	.probe = blktap_probe,
++	.remove = blktap_remove,
++	.otherend_changed = tap_frontend_changed
 +};
 +
 +
-+/* local variables */
-+
-+/* local function prototypes */
-+static int _vtpm_send_queued(struct tpm_chip *chip);
-+
-+
-+/* =============================================================
-+ * Some utility functions
-+ * =============================================================
-+ */
-+static void vtpm_state_init(struct vtpm_state *vtpms)
++void tap_blkif_xenbus_init(void)
 +{
-+	vtpms->current_request = NULL;
-+	spin_lock_init(&vtpms->req_list_lock);
-+	init_waitqueue_head(&vtpms->req_wait_queue);
-+	INIT_LIST_HEAD(&vtpms->queued_requests);
-+
-+	vtpms->current_response = NULL;
-+	spin_lock_init(&vtpms->resp_list_lock);
-+	init_waitqueue_head(&vtpms->resp_wait_queue);
-+
-+	vtpms->disconnect_time = jiffies;
++	xenbus_register_backend(&blktap);
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/char/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/char/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++obj-$(CONFIG_XEN_DEVMEM)	:= mem.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/char/mem.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/char/mem.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,190 @@
++/*
++ *  Originally from linux/drivers/char/mem.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  Added devfs support. 
++ *    Jan-11-1998, C. Scott Ananian <cananian at alumni.princeton.edu>
++ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj at sgi.com>
++ */
 +
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mman.h>
++#include <linux/random.h>
++#include <linux/init.h>
++#include <linux/raw.h>
++#include <linux/tty.h>
++#include <linux/capability.h>
++#include <linux/smp_lock.h>
++#include <linux/ptrace.h>
++#include <linux/device.h>
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
 +
-+static inline struct transmission *transmission_alloc(void)
++static inline int uncached_access(struct file *file)
 +{
-+	return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++	if (file->f_flags & O_SYNC)
++		return 1;
++	/* Xen sets correct MTRR type on non-RAM for us. */
++	return 0;
 +}
 +
-+static unsigned char *
-+transmission_set_req_buffer(struct transmission *t,
-+                            unsigned char *buffer, size_t len)
++/*
++ * This funcion reads the *physical* memory. The f_pos points directly to the 
++ * memory location. 
++ */
++static ssize_t read_mem(struct file * file, char __user * buf,
++			size_t count, loff_t *ppos)
 +{
-+	if (t->request_buflen < len) {
-+		kfree(t->request);
-+		t->request = kmalloc(len, GFP_KERNEL);
-+		if (!t->request) {
-+			t->request_buflen = 0;
-+			return NULL;
-+		}
-+		t->request_buflen = len;
-+	}
++	unsigned long p = *ppos, ignored;
++	ssize_t read = 0, sz;
++	void __iomem *v;
 +
-+	memcpy(t->request, buffer, len);
-+	t->request_len = len;
++	while (count > 0) {
++		/*
++		 * Handle first page in case it's not aligned
++		 */
++		if (-p & (PAGE_SIZE - 1))
++			sz = -p & (PAGE_SIZE - 1);
++		else
++			sz = PAGE_SIZE;
 +
-+	return t->request;
-+}
++		sz = min_t(unsigned long, sz, count);
 +
-+static unsigned char *
-+transmission_set_res_buffer(struct transmission *t,
-+                            const unsigned char *buffer, size_t len)
-+{
-+	if (t->response_buflen < len) {
-+		kfree(t->response);
-+		t->response = kmalloc(len, GFP_ATOMIC);
-+		if (!t->response) {
-+			t->response_buflen = 0;
-+			return NULL;
++		v = ioremap(p, sz);
++		if (IS_ERR(v) || v == NULL) {
++			/*
++			 * Some programs (e.g., dmidecode) groove off into
++			 * weird RAM areas where no tables can possibly exist
++			 * (because Xen will have stomped on them!). These
++			 * programs get rather upset if we let them know that
++			 * Xen failed their access, so we fake out a read of
++			 * all zeroes.
++			 */
++			if (clear_user(buf, count))
++				return -EFAULT;
++			read += count;
++			break;
 +		}
-+		t->response_buflen = len;
-+	}
 +
-+	memcpy(t->response, buffer, len);
-+	t->response_len = len;
-+
-+	return t->response;
-+}
++		ignored = copy_to_user(buf, v, sz);
++		iounmap(v);
++		if (ignored)
++			return -EFAULT;
++		buf += sz;
++		p += sz;
++		count -= sz;
++		read += sz;
++	}
 +
-+static inline void transmission_free(struct transmission *t)
-+{
-+	kfree(t->request);
-+	kfree(t->response);
-+	kfree(t);
++	*ppos += read;
++	return read;
 +}
 +
-+/* =============================================================
-+ * Interface with the lower layer driver
-+ * =============================================================
-+ */
-+/*
-+ * Lower layer uses this function to make a response available.
-+ */
-+int vtpm_vd_recv(const struct tpm_chip *chip,
-+                 const unsigned char *buffer, size_t count,
-+                 void *ptr)
++static ssize_t write_mem(struct file * file, const char __user * buf, 
++			 size_t count, loff_t *ppos)
 +{
-+	unsigned long flags;
-+	int ret_size = 0;
-+	struct transmission *t;
-+	struct vtpm_state *vtpms;
++	unsigned long p = *ppos, ignored;
++	ssize_t written = 0, sz;
++	void __iomem *v;
 +
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
++	while (count > 0) {
++		/*
++		 * Handle first page in case it's not aligned
++		 */
++		if (-p & (PAGE_SIZE - 1))
++			sz = -p & (PAGE_SIZE - 1);
++		else
++			sz = PAGE_SIZE;
 +
-+	/*
-+	 * The list with requests must contain one request
-+	 * only and the element there must be the one that
-+	 * was passed to me from the front-end.
-+	 */
-+	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+	if (vtpms->current_request != ptr) {
-+		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+		return 0;
-+	}
++		sz = min_t(unsigned long, sz, count);
 +
-+	if ((t = vtpms->current_request)) {
-+		transmission_free(t);
-+		vtpms->current_request = NULL;
-+	}
++		v = ioremap(p, sz);
++		if (v == NULL)
++			break;
++		if (IS_ERR(v)) {
++			if (written == 0)
++				return PTR_ERR(v);
++			break;
++		}
 +
-+	t = transmission_alloc();
-+	if (t) {
-+		if (!transmission_set_res_buffer(t, buffer, count)) {
-+			transmission_free(t);
-+			spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+			return -ENOMEM;
++		ignored = copy_from_user(v, buf, sz);
++		iounmap(v);
++		if (ignored) {
++			written += sz - ignored;
++			if (written)
++				break;
++			return -EFAULT;
 +		}
-+		ret_size = count;
-+		vtpms->current_response = t;
-+		wake_up_interruptible(&vtpms->resp_wait_queue);
++		buf += sz;
++		p += sz;
++		count -= sz;
++		written += sz;
 +	}
-+	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
 +
-+	return ret_size;
++	*ppos += written;
++	return written;
 +}
 +
-+
-+/*
-+ * Lower layer indicates its status (connected/disconnected)
-+ */
-+void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
++#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
++static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
 +{
-+	struct vtpm_state *vtpms;
++	size_t size = vma->vm_end - vma->vm_start;
 +
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
++	if (uncached_access(file))
++		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 +
-+	vtpms->vd_status = vd_status;
-+	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
-+		vtpms->disconnect_time = jiffies;
-+	}
++	/* We want to return the real error code, not EAGAIN. */
++	return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++				      size, vma->vm_page_prot, DOMID_IO);
 +}
++#endif
 +
-+/* =============================================================
-+ * Interface with the generic TPM driver
-+ * =============================================================
++/*
++ * The memory devices use the full 32/64 bits of the offset, and so we cannot
++ * check against negative addresses: they are ok. The return value is weird,
++ * though, in that case (0).
++ *
++ * also note that seeking relative to the "end of file" isn't supported:
++ * it has no meaning, so it returns -EINVAL.
 + */
-+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
++static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
 +{
-+	int rc = 0;
-+	unsigned long flags;
-+	struct vtpm_state *vtpms;
-+
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+	/*
-+	 * Check if the previous operation only queued the command
-+	 * In this case there won't be a response, so I just
-+	 * return from here and reset that flag. In any other
-+	 * case I should receive a response from the back-end.
-+	 */
-+	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+	if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
-+		vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
-+		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+		/*
-+		 * The first few commands (measurements) must be
-+		 * queued since it might not be possible to talk to the
-+		 * TPM, yet.
-+		 * Return a response of up to 30 '0's.
-+		 */
-+
-+		count = min_t(size_t, count, 30);
-+		memset(buf, 0x0, count);
-+		return count;
-+	}
-+	/*
-+	 * Check whether something is in the responselist and if
-+	 * there's nothing in the list wait for something to appear.
-+	 */
-+
-+	if (!vtpms->current_response) {
-+		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+		interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
-+		                               1000);
-+		spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
-+	}
++	loff_t ret;
 +
-+	if (vtpms->current_response) {
-+		struct transmission *t = vtpms->current_response;
-+		vtpms->current_response = NULL;
-+		rc = min(count, t->response_len);
-+		memcpy(buf, t->response, rc);
-+		transmission_free(t);
++	mutex_lock(&file->f_dentry->d_inode->i_mutex);
++	switch (orig) {
++		case 0:
++			file->f_pos = offset;
++			ret = file->f_pos;
++			force_successful_syscall_return();
++			break;
++		case 1:
++			file->f_pos += offset;
++			ret = file->f_pos;
++			force_successful_syscall_return();
++			break;
++		default:
++			ret = -EINVAL;
 +	}
-+
-+	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+	return rc;
++	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++	return ret;
 +}
 +
-+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
++static int open_mem(struct inode * inode, struct file * filp)
 +{
-+	int rc = 0;
-+	unsigned long flags;
-+	struct transmission *t = transmission_alloc();
-+	struct vtpm_state *vtpms;
-+
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
++	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
 +
-+	if (!t)
-+		return -ENOMEM;
-+	/*
-+	 * If there's a current request, it must be the
-+	 * previous request that has timed out.
-+	 */
-+	spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+	if (vtpms->current_request != NULL) {
-+		printk("WARNING: Sending although there is a request outstanding.\n"
-+		       "         Previous request must have timed out.\n");
-+		transmission_free(vtpms->current_request);
-+		vtpms->current_request = NULL;
-+	}
-+	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++const struct file_operations mem_fops = {
++	.llseek		= memory_lseek,
++	.read		= read_mem,
++	.write		= write_mem,
++	.mmap		= xen_mmap_mem,
++	.open		= open_mem,
++};
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/console/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/console/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
 +
-+	/*
-+	 * Queue the packet if the driver below is not
-+	 * ready, yet, or there is any packet already
-+	 * in the queue.
-+	 * If the driver below is ready, unqueue all
-+	 * packets first before sending our current
-+	 * packet.
-+	 * For each unqueued packet, except for the
-+	 * last (=current) packet, call the function
-+	 * tpm_xen_recv to wait for the response to come
-+	 * back.
-+	 */
-+	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
-+		if (time_after(jiffies,
-+		               vtpms->disconnect_time + HZ * 10)) {
-+			rc = -ENOENT;
-+		} else {
-+			goto queue_it;
-+		}
-+	} else {
-+		/*
-+		 * Send all queued packets.
-+		 */
-+		if (_vtpm_send_queued(chip) == 0) {
++obj-y	:= console.o xencons_ring.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/console/console.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/console/console.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,731 @@
++/******************************************************************************
++ * console.c
++ * 
++ * Virtual console driver.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+			vtpms->current_request = t;
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/sysrq.h>
++#include <linux/screen_info.h>
++#include <linux/vt.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/event_channel.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/xencons.h>
 +
-+			rc = vtpm_vd_send(vtpms->tpm_private,
-+			                  buf,
-+			                  count,
-+			                  t);
-+			/*
-+			 * The generic TPM driver will call
-+			 * the function to receive the response.
-+			 */
-+			if (rc < 0) {
-+				vtpms->current_request = NULL;
-+				goto queue_it;
-+			}
-+		} else {
-+queue_it:
-+			if (!transmission_set_req_buffer(t, buf, count)) {
-+				transmission_free(t);
-+				rc = -ENOMEM;
-+				goto exit;
-+			}
-+			/*
-+			 * An error occurred. Don't event try
-+			 * to send the current request. Just
-+			 * queue it.
-+			 */
-+			spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+			vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
-+			list_add_tail(&t->next, &vtpms->queued_requests);
-+			spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
-+		}
-+	}
++/*
++ * Modes:
++ *  'xencons=off'  [XC_OFF]:     Console is disabled.
++ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
++ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
++ *  'xencons=xvc'  [XC_XVC]:     Console attached to '/dev/xvc0'.
++ *  default:                     XC_XVC
++ * 
++ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
++ * warnings from standard distro startup scripts.
++ */
++static enum {
++	XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
++} xc_mode = XC_XVC;
++static int xc_num = -1;
 +
-+exit:
-+	return rc;
-+}
++/* /dev/xvc0 device number allocated by lanana.org. */
++#define XEN_XVC_MAJOR 204
++#define XEN_XVC_MINOR 191
 +
++#ifdef CONFIG_MAGIC_SYSRQ
++static unsigned long sysrq_requested;
++extern int sysrq_enabled;
++#endif
 +
-+/*
-+ * Send all queued requests.
-+ */
-+static int _vtpm_send_queued(struct tpm_chip *chip)
++static int __init xencons_setup(char *str)
 +{
-+	int rc;
-+	int error = 0;
-+	long flags;
-+	unsigned char buffer[1];
-+	struct vtpm_state *vtpms;
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
++	char *q;
++	int n;
++	extern int console_use_vt;
 +
-+	spin_lock_irqsave(&vtpms->req_list_lock, flags);
++	console_use_vt = 1;
++	if (!strncmp(str, "ttyS", 4)) {
++		xc_mode = XC_SERIAL;
++		str += 4;
++	} else if (!strncmp(str, "tty", 3)) {
++		xc_mode = XC_TTY;
++		str += 3;
++		console_use_vt = 0;
++	} else if (!strncmp(str, "xvc", 3)) {
++		xc_mode = XC_XVC;
++		str += 3;
++	} else if (!strncmp(str, "off", 3)) {
++		xc_mode = XC_OFF;
++		str += 3;
++	}
 +
-+	while (!list_empty(&vtpms->queued_requests)) {
-+		/*
-+		 * Need to dequeue them.
-+		 * Read the result into a dummy buffer.
-+		 */
-+		struct transmission *qt = (struct transmission *)
-+		                          vtpms->queued_requests.next;
-+		list_del(&qt->next);
-+		vtpms->current_request = qt;
-+		spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++	n = simple_strtol(str, &q, 10);
++	if (q != str)
++		xc_num = n;
 +
-+		rc = vtpm_vd_send(vtpms->tpm_private,
-+		                  qt->request,
-+		                  qt->request_len,
-+		                  qt);
++	return 1;
++}
++__setup("xencons=", xencons_setup);
 +
-+		if (rc < 0) {
-+			spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+			if ((qt = vtpms->current_request) != NULL) {
-+				/*
-+				 * requeue it at the beginning
-+				 * of the list
-+				 */
-+				list_add(&qt->next,
-+				         &vtpms->queued_requests);
-+			}
-+			vtpms->current_request = NULL;
-+			error = 1;
-+			break;
-+		}
-+		/*
-+		 * After this point qt is not valid anymore!
-+		 * It is freed when the front-end is delivering
-+		 * the data by calling tpm_recv
-+		 */
-+		/*
-+		 * Receive response into provided dummy buffer
-+		 */
-+		rc = vtpm_recv(chip, buffer, sizeof(buffer));
-+		spin_lock_irqsave(&vtpms->req_list_lock, flags);
++/* The kernel and user-land drivers share a common transmit buffer. */
++static unsigned int wbuf_size = 4096;
++#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
++static char *wbuf;
++static unsigned int wc, wp; /* write_cons, write_prod */
++
++static int __init xencons_bufsz_setup(char *str)
++{
++	unsigned int goal;
++	goal = simple_strtoul(str, NULL, 0);
++	if (goal) {
++		goal = roundup_pow_of_two(goal);
++		if (wbuf_size < goal)
++			wbuf_size = goal;
 +	}
++	return 1;
++}
++__setup("xencons_bufsz=", xencons_bufsz_setup);
 +
-+	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++/* This lock protects accesses to the common transmit buffer. */
++static DEFINE_SPINLOCK(xencons_lock);
 +
-+	return error;
-+}
++/* Common transmit-kick routine. */
++static void __xencons_tx_flush(void);
 +
-+static void vtpm_cancel(struct tpm_chip *chip)
++static struct tty_driver *xencons_driver;
++
++/******************** Kernel console driver ********************************/
++
++static void kcons_write(struct console *c, const char *s, unsigned int count)
 +{
++	int           i = 0;
 +	unsigned long flags;
-+	struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
-+	spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++	spin_lock_irqsave(&xencons_lock, flags);
 +
-+	if (!vtpms->current_response && vtpms->current_request) {
-+		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+		interruptible_sleep_on(&vtpms->resp_wait_queue);
-+		spin_lock_irqsave(&vtpms->resp_list_lock,flags);
-+	}
++	while (i < count) {
++		for (; i < count; i++) {
++			if ((wp - wc) >= (wbuf_size - 1))
++				break;
++			if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
++				wbuf[WBUF_MASK(wp++)] = '\r';
++		}
 +
-+	if (vtpms->current_response) {
-+		struct transmission *t = vtpms->current_response;
-+		vtpms->current_response = NULL;
-+		transmission_free(t);
++		__xencons_tx_flush();
 +	}
 +
-+	spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
-+static u8 vtpm_status(struct tpm_chip *chip)
++static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
 +{
-+	u8 rc = 0;
-+	unsigned long flags;
-+	struct vtpm_state *vtpms;
-+
-+	vtpms = (struct vtpm_state *)chip_get_private(chip);
 +
-+	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+	/*
-+	 * Data are available if:
-+	 *  - there's a current response
-+	 *  - the last packet was queued only (this is fake, but necessary to
-+	 *      get the generic TPM layer to call the receive function.)
-+	 */
-+	if (vtpms->current_response ||
-+	    0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
-+		rc = STATUS_DATA_AVAIL;
-+	} else if (!vtpms->current_response && !vtpms->current_request) {
-+		rc = STATUS_READY;
++	while (count > 0) {
++		int rc;
++		rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
++		if (rc <= 0)
++			break;
++		count -= rc;
++		s += rc;
 +	}
-+
-+	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+	return rc;
 +}
 +
-+static struct file_operations vtpm_ops = {
-+	.owner = THIS_MODULE,
-+	.llseek = no_llseek,
-+	.open = tpm_open,
-+	.read = tpm_read,
-+	.write = tpm_write,
-+	.release = tpm_release,
-+};
-+
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
-+		   NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++static struct tty_driver *kcons_device(struct console *c, int *index)
++{
++	*index = 0;
++	return xencons_driver;
++}
 +
-+static struct attribute *vtpm_attrs[] = {
-+	&dev_attr_pubek.attr,
-+	&dev_attr_pcrs.attr,
-+	&dev_attr_enabled.attr,
-+	&dev_attr_active.attr,
-+	&dev_attr_owned.attr,
-+	&dev_attr_temp_deactivated.attr,
-+	&dev_attr_caps.attr,
-+	&dev_attr_cancel.attr,
-+	NULL,
++static struct console kcons_info = {
++	.device	= kcons_device,
++	.flags	= CON_PRINTBUFFER | CON_ENABLED,
++	.index	= -1,
 +};
 +
-+static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
-+
-+#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
++static int __init xen_console_init(void)
++{
++	if (!is_running_on_xen())
++		goto out;
 +
-+static struct tpm_vendor_specific tpm_vtpm = {
-+	.recv = vtpm_recv,
-+	.send = vtpm_send,
-+	.cancel = vtpm_cancel,
-+	.status = vtpm_status,
-+	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
-+	.req_complete_val  = STATUS_DATA_AVAIL,
-+	.req_canceled = STATUS_READY,
-+	.attr_group = &vtpm_attr_grp,
-+	.miscdev = {
-+		.fops = &vtpm_ops,
-+	},
-+	.duration = {
-+		TPM_LONG_TIMEOUT,
-+		TPM_LONG_TIMEOUT,
-+		TPM_LONG_TIMEOUT,
-+	},
-+};
++	if (is_initial_xendomain()) {
++		kcons_info.write = kcons_write_dom0;
++	} else {
++		if (!xen_start_info->console.domU.evtchn)
++			goto out;
++		kcons_info.write = kcons_write;
++	}
 +
-+struct tpm_chip *init_vtpm(struct device *dev,
-+                           struct tpm_private *tp)
-+{
-+	long rc;
-+	struct tpm_chip *chip;
-+	struct vtpm_state *vtpms;
++	switch (xc_mode) {
++	case XC_XVC:
++		strcpy(kcons_info.name, "xvc");
++		if (xc_num == -1)
++			xc_num = 0;
++		break;
 +
-+	vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
-+	if (!vtpms)
-+		return ERR_PTR(-ENOMEM);
++	case XC_SERIAL:
++		strcpy(kcons_info.name, "ttyS");
++		if (xc_num == -1)
++			xc_num = 0;
++		break;
 +
-+	vtpm_state_init(vtpms);
-+	vtpms->tpm_private = tp;
++	case XC_TTY:
++		strcpy(kcons_info.name, "tty");
++		if (xc_num == -1)
++			xc_num = 1;
++		break;
 +
-+	chip = tpm_register_hardware(dev, &tpm_vtpm);
-+	if (!chip) {
-+		rc = -ENODEV;
-+		goto err_free_mem;
++	default:
++		goto out;
 +	}
 +
-+	chip_set_private(chip, vtpms);
-+
-+	return chip;
++	wbuf = alloc_bootmem(wbuf_size);
 +
-+err_free_mem:
-+	kfree(vtpms);
++	register_console(&kcons_info);
 +
-+	return ERR_PTR(rc);
++ out:
++	return 0;
 +}
++console_initcall(xen_console_init);
 +
-+void cleanup_vtpm(struct device *dev)
++/*** Useful function for console debugging -- goes straight to Xen. ***/
++asmlinkage int xprintk(const char *fmt, ...)
 +{
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
-+	struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
-+	tpm_remove_hardware(dev);
-+	kfree(vtpms);
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/tpm_vtpm.h tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_vtpm.h
---- pristine-linux-2.6.18.2/drivers/char/tpm/tpm_vtpm.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_vtpm.h	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,55 @@
-+#ifndef TPM_VTPM_H
-+#define TPM_VTPM_H
-+
-+struct tpm_chip;
-+struct tpm_private;
-+
-+struct vtpm_state {
-+	struct transmission *current_request;
-+	spinlock_t           req_list_lock;
-+	wait_queue_head_t    req_wait_queue;
-+
-+	struct list_head     queued_requests;
-+
-+	struct transmission *current_response;
-+	spinlock_t           resp_list_lock;
-+	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
-+
-+	u8                   vd_status;
-+	u8                   flags;
-+
-+	unsigned long        disconnect_time;
++	va_list args;
++	int printk_len;
++	static char printk_buf[1024];
 +
-+	/*
-+	 * The following is a private structure of the underlying
-+	 * driver. It is passed as parameter in the send function.
-+	 */
-+	struct tpm_private *tpm_private;
-+};
++	/* Emit the output into the temporary buffer */
++	va_start(args, fmt);
++	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
++	va_end(args);
 +
++	/* Send the processed output directly to Xen. */
++	kcons_write_dom0(NULL, printk_buf, printk_len);
 +
-+enum vdev_status {
-+	TPM_VD_STATUS_DISCONNECTED = 0x0,
-+	TPM_VD_STATUS_CONNECTED = 0x1
-+};
++	return 0;
++}
 +
-+/* this function is called from tpm_vtpm.c */
-+int vtpm_vd_send(struct tpm_private * tp,
-+                 const u8 * buf, size_t count, void *ptr);
++/*** Forcibly flush console data before dying. ***/
++void xencons_force_flush(void)
++{
++	int sz;
 +
-+/* these functions are offered by tpm_vtpm.c */
-+struct tpm_chip *init_vtpm(struct device *,
-+                           struct tpm_private *);
-+void cleanup_vtpm(struct device *);
-+int vtpm_vd_recv(const struct tpm_chip* chip,
-+                 const unsigned char *buffer, size_t count, void *ptr);
-+void vtpm_vd_status(const struct tpm_chip *, u8 status);
++	/* Emergency console is synchronous, so there's nothing to flush. */
++	if (!is_running_on_xen() ||
++	    is_initial_xendomain() ||
++	    !xen_start_info->console.domU.evtchn)
++		return;
 +
-+static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
-+{
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
-+	struct vtpm_state *vtpms = chip_get_private(chip);
-+	return vtpms->tpm_private;
++	/* Spin until console data is flushed through to the daemon. */
++	while (wc != wp) {
++		int sent = 0;
++		if ((sz = wp - wc) == 0)
++			continue;
++		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++		if (sent > 0)
++			wc += sent;
++	}
 +}
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tpm/tpm_xen.c tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_xen.c
---- pristine-linux-2.6.18.2/drivers/char/tpm/tpm_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tpm/tpm_xen.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,720 @@
-+/*
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/err.h>
-+#include <linux/interrupt.h>
-+#include <linux/mutex.h>
-+#include <asm/uaccess.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include "tpm.h"
-+#include "tpm_vtpm.h"
 +
-+#undef DEBUG
++void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size_t size)
++{
++	/* This is drawn from a dump from vgacon:startup in
++	 * standard Linux. */
++	screen_info.orig_video_mode = 3;
++	screen_info.orig_video_isVGA = 1;
++	screen_info.orig_video_lines = 25;
++	screen_info.orig_video_cols = 80;
++	screen_info.orig_video_ega_bx = 3;
++	screen_info.orig_video_points = 16;
++	screen_info.orig_y = screen_info.orig_video_lines - 1;
 +
-+/* local structures */
-+struct tpm_private {
-+	struct tpm_chip *chip;
++	switch (info->video_type) {
++	case XEN_VGATYPE_TEXT_MODE_3:
++		if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
++		           + sizeof(info->u.text_mode_3))
++			break;
++		screen_info.orig_video_lines = info->u.text_mode_3.rows;
++		screen_info.orig_video_cols = info->u.text_mode_3.columns;
++		screen_info.orig_x = info->u.text_mode_3.cursor_x;
++		screen_info.orig_y = info->u.text_mode_3.cursor_y;
++		screen_info.orig_video_points =
++			info->u.text_mode_3.font_height;
++		break;
 +
-+	tpmif_tx_interface_t *tx;
-+	atomic_t refcnt;
-+	unsigned int irq;
-+	u8 is_connected;
-+	u8 is_suspended;
++	case XEN_VGATYPE_VESA_LFB:
++		if (size < offsetof(struct dom0_vga_console_info,
++		                    u.vesa_lfb.gbl_caps))
++			break;
++		screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
++		screen_info.lfb_width = info->u.vesa_lfb.width;
++		screen_info.lfb_height = info->u.vesa_lfb.height;
++		screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
++		screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
++		screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
++		screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
++		screen_info.red_size = info->u.vesa_lfb.red_size;
++		screen_info.red_pos = info->u.vesa_lfb.red_pos;
++		screen_info.green_size = info->u.vesa_lfb.green_size;
++		screen_info.green_pos = info->u.vesa_lfb.green_pos;
++		screen_info.blue_size = info->u.vesa_lfb.blue_size;
++		screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
++		screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
++		screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
++		if (size >= offsetof(struct dom0_vga_console_info,
++		                     u.vesa_lfb.gbl_caps)
++		            + sizeof(info->u.vesa_lfb.gbl_caps))
++			screen_info.capabilities = info->u.vesa_lfb.gbl_caps;
++		if (size >= offsetof(struct dom0_vga_console_info,
++		                     u.vesa_lfb.mode_attrs)
++		            + sizeof(info->u.vesa_lfb.mode_attrs))
++			screen_info.vesa_attributes = info->u.vesa_lfb.mode_attrs;
++		break;
++	}
++}
 +
-+	spinlock_t tx_lock;
 +
-+	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++/******************** User-space console driver (/dev/console) ************/
 +
-+	atomic_t tx_busy;
-+	void *tx_remember;
++#define DRV(_d)         (_d)
++#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) &&		\
++			 ((_tty)->index != (xc_num - 1)))
 +
-+	domid_t backend_id;
-+	wait_queue_head_t wait_q;
++static struct termios *xencons_termios[MAX_NR_CONSOLES];
++static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct tty_struct *xencons_tty;
++static int xencons_priv_irq;
++static char x_char;
 +
-+	struct xenbus_device *dev;
-+	int ring_ref;
-+};
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++{
++	int           i;
++	unsigned long flags;
 +
-+struct tx_buffer {
-+	unsigned int size;	// available space in data
-+	unsigned int len;	// used space in data
-+	unsigned char *data;	// pointer to a page
-+};
++	spin_lock_irqsave(&xencons_lock, flags);
++	if (xencons_tty == NULL)
++		goto out;
 +
++	for (i = 0; i < len; i++) {
++#ifdef CONFIG_MAGIC_SYSRQ
++		if (sysrq_enabled) {
++			if (buf[i] == '\x0f') { /* ^O */
++				if (!sysrq_requested) {
++					sysrq_requested = jiffies;
++					continue; /* don't print sysrq key */
++				}
++				sysrq_requested = 0;
++			} else if (sysrq_requested) {
++				unsigned long sysrq_timeout =
++					sysrq_requested + HZ*2;
++				sysrq_requested = 0;
++				if (time_before(jiffies, sysrq_timeout)) {
++					spin_unlock_irqrestore(
++						&xencons_lock, flags);
++					handle_sysrq(
++						buf[i], regs, xencons_tty);
++					spin_lock_irqsave(
++						&xencons_lock, flags);
++					continue;
++				}
++			}
++		}
++#endif
++		tty_insert_flip_char(xencons_tty, buf[i], 0);
++	}
++	tty_flip_buffer_push(xencons_tty);
 +
-+/* locally visible variables */
-+static grant_ref_t gref_head;
-+static struct tpm_private *my_priv;
++ out:
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
 +
-+/* local function prototypes */
-+static irqreturn_t tpmif_int(int irq,
-+                             void *tpm_priv,
-+                             struct pt_regs *ptregs);
-+static void tpmif_rx_action(unsigned long unused);
-+static int tpmif_connect(struct xenbus_device *dev,
-+                         struct tpm_private *tp,
-+                         domid_t domid);
-+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
-+static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
-+static void tpmif_free_tx_buffers(struct tpm_private *tp);
-+static void tpmif_set_connected_state(struct tpm_private *tp,
-+                                      u8 newstate);
-+static int tpm_xmit(struct tpm_private *tp,
-+                    const u8 * buf, size_t count, int userbuffer,
-+                    void *remember);
-+static void destroy_tpmring(struct tpm_private *tp);
-+void __exit tpmif_exit(void);
++static void __xencons_tx_flush(void)
++{
++	int sent, sz, work_done = 0;
 +
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++	if (x_char) {
++		if (is_initial_xendomain())
++			kcons_write_dom0(NULL, &x_char, 1);
++		else
++			while (x_char)
++				if (xencons_ring_send(&x_char, 1) == 1)
++					break;
++		x_char = 0;
++		work_done = 1;
++	}
 +
-+#define GRANT_INVALID_REF	0
++	while (wc != wp) {
++		sz = wp - wc;
++		if (sz > (wbuf_size - WBUF_MASK(wc)))
++			sz = wbuf_size - WBUF_MASK(wc);
++		if (is_initial_xendomain()) {
++			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
++			wc += sz;
++		} else {
++			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++			if (sent == 0)
++				break;
++			wc += sent;
++		}
++		work_done = 1;
++	}
 +
++	if (work_done && (xencons_tty != NULL)) {
++		wake_up_interruptible(&xencons_tty->write_wait);
++		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++		    (xencons_tty->ldisc.write_wakeup != NULL))
++			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
++	}
++}
 +
-+static inline int
-+tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
-+               int isuserbuffer)
++void xencons_tx(void)
 +{
-+	int copied = len;
++	unsigned long flags;
 +
-+	if (len > txb->size)
-+		copied = txb->size;
-+	if (isuserbuffer) {
-+		if (copy_from_user(txb->data, src, copied))
-+			return -EFAULT;
-+	} else {
-+		memcpy(txb->data, src, copied);
-+	}
-+	txb->len = len;
-+	return copied;
++	spin_lock_irqsave(&xencons_lock, flags);
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
-+static inline struct tx_buffer *tx_buffer_alloc(void)
++/* Privileged receive callback and transmit kicker. */
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
++					  struct pt_regs *regs)
 +{
-+	struct tx_buffer *txb;
++	static char rbuf[16];
++	int         l;
 +
-+	txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
-+	if (!txb)
-+		return NULL;
++	while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
++		xencons_rx(rbuf, l, regs);
 +
-+	txb->len = 0;
-+	txb->size = PAGE_SIZE;
-+	txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-+	if (txb->data == NULL) {
-+		kfree(txb);
-+		txb = NULL;
-+	}
++	xencons_tx();
 +
-+	return txb;
++	return IRQ_HANDLED;
 +}
 +
++static int xencons_write_room(struct tty_struct *tty)
++{
++	return wbuf_size - (wp - wc);
++}
 +
-+static inline void tx_buffer_free(struct tx_buffer *txb)
++static int xencons_chars_in_buffer(struct tty_struct *tty)
 +{
-+	if (txb) {
-+		free_page((long)txb->data);
-+		kfree(txb);
-+	}
++	return wp - wc;
 +}
 +
-+/**************************************************************
-+ Utility function for the tpm_private structure
-+**************************************************************/
-+static void tpm_private_init(struct tpm_private *tp)
++static void xencons_send_xchar(struct tty_struct *tty, char ch)
 +{
-+	spin_lock_init(&tp->tx_lock);
-+	init_waitqueue_head(&tp->wait_q);
-+	atomic_set(&tp->refcnt, 1);
++	unsigned long flags;
++
++	if (DUMMY_TTY(tty))
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	x_char = ch;
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
-+static void tpm_private_put(void)
++static void xencons_throttle(struct tty_struct *tty)
 +{
-+	if (!atomic_dec_and_test(&my_priv->refcnt))
++	if (DUMMY_TTY(tty))
 +		return;
 +
-+	tpmif_free_tx_buffers(my_priv);
-+	kfree(my_priv);
-+	my_priv = NULL;
++	if (I_IXOFF(tty))
++		xencons_send_xchar(tty, STOP_CHAR(tty));
 +}
 +
-+static struct tpm_private *tpm_private_get(void)
++static void xencons_unthrottle(struct tty_struct *tty)
 +{
-+	int err;
++	if (DUMMY_TTY(tty))
++		return;
 +
-+	if (my_priv) {
-+		atomic_inc(&my_priv->refcnt);
-+		return my_priv;
++	if (I_IXOFF(tty)) {
++		if (x_char != 0)
++			x_char = 0;
++		else
++			xencons_send_xchar(tty, START_CHAR(tty));
 +	}
++}
 +
-+	my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
-+	if (!my_priv)
-+		return NULL;
++static void xencons_flush_buffer(struct tty_struct *tty)
++{
++	unsigned long flags;
 +
-+	tpm_private_init(my_priv);
-+	err = tpmif_allocate_tx_buffers(my_priv);
-+	if (err < 0)
-+		tpm_private_put();
++	if (DUMMY_TTY(tty))
++		return;
 +
-+	return my_priv;
++	spin_lock_irqsave(&xencons_lock, flags);
++	wc = wp = 0;
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
-+/**************************************************************
++static inline int __xencons_put_char(int ch)
++{
++	char _ch = (char)ch;
++	if ((wp - wc) == wbuf_size)
++		return 0;
++	wbuf[WBUF_MASK(wp++)] = _ch;
++	return 1;
++}
 +
-+ The interface to let the tpm plugin register its callback
-+ function and send data to another partition using this module
++static int xencons_write(
++	struct tty_struct *tty,
++	const unsigned char *buf,
++	int count)
++{
++	int i;
++	unsigned long flags;
 +
-+**************************************************************/
++	if (DUMMY_TTY(tty))
++		return count;
 +
-+static DEFINE_MUTEX(suspend_lock);
-+/*
-+ * Send data via this module by calling this function
-+ */
-+int vtpm_vd_send(struct tpm_private *tp,
-+                 const u8 * buf, size_t count, void *ptr)
++	spin_lock_irqsave(&xencons_lock, flags);
++
++	for (i = 0; i < count; i++)
++		if (!__xencons_put_char(buf[i]))
++			break;
++
++	if (i != 0)
++		__xencons_tx_flush();
++
++	spin_unlock_irqrestore(&xencons_lock, flags);
++
++	return i;
++}
++
++static void xencons_put_char(struct tty_struct *tty, u_char ch)
 +{
-+	int sent;
++	unsigned long flags;
 +
-+	mutex_lock(&suspend_lock);
-+	sent = tpm_xmit(tp, buf, count, 0, ptr);
-+	mutex_unlock(&suspend_lock);
++	if (DUMMY_TTY(tty))
++		return;
 +
-+	return sent;
++	spin_lock_irqsave(&xencons_lock, flags);
++	(void)__xencons_put_char(ch);
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
-+/**************************************************************
-+ XENBUS support code
-+**************************************************************/
++static void xencons_flush_chars(struct tty_struct *tty)
++{
++	unsigned long flags;
 +
-+static int setup_tpmring(struct xenbus_device *dev,
-+                         struct tpm_private *tp)
++	if (DUMMY_TTY(tty))
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
 +{
-+	tpmif_tx_interface_t *sring;
-+	int err;
++	unsigned long orig_jiffies = jiffies;
 +
-+	tp->ring_ref = GRANT_INVALID_REF;
++	if (DUMMY_TTY(tty))
++		return;
 +
-+	sring = (void *)__get_free_page(GFP_KERNEL);
-+	if (!sring) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+		return -ENOMEM;
++	while (DRV(tty->driver)->chars_in_buffer(tty)) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule_timeout(1);
++		if (signal_pending(current))
++			break;
++		if (timeout && time_after(jiffies, orig_jiffies + timeout))
++			break;
 +	}
-+	tp->tx = sring;
 +
-+	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
-+	if (err < 0) {
-+		free_page((unsigned long)sring);
-+		tp->tx = NULL;
-+		xenbus_dev_fatal(dev, err, "allocating grant reference");
-+		goto fail;
-+	}
-+	tp->ring_ref = err;
++	set_current_state(TASK_RUNNING);
++}
 +
-+	err = tpmif_connect(dev, tp, dev->otherend_id);
-+	if (err)
-+		goto fail;
++static int xencons_open(struct tty_struct *tty, struct file *filp)
++{
++	unsigned long flags;
++
++	if (DUMMY_TTY(tty))
++		return 0;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	tty->driver_data = NULL;
++	if (xencons_tty == NULL)
++		xencons_tty = tty;
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +
 +	return 0;
-+fail:
-+	destroy_tpmring(tp);
-+	return err;
 +}
 +
-+
-+static void destroy_tpmring(struct tpm_private *tp)
++static void xencons_close(struct tty_struct *tty, struct file *filp)
 +{
-+	tpmif_set_connected_state(tp, 0);
++	unsigned long flags;
 +
-+	if (tp->ring_ref != GRANT_INVALID_REF) {
-+		gnttab_end_foreign_access(tp->ring_ref, 0,
-+					  (unsigned long)tp->tx);
-+		tp->ring_ref = GRANT_INVALID_REF;
-+		tp->tx = NULL;
++	if (DUMMY_TTY(tty))
++		return;
++
++	mutex_lock(&tty_mutex);
++
++	if (tty->count != 1) {
++		mutex_unlock(&tty_mutex);
++		return;
 +	}
 +
-+	if (tp->irq)
-+		unbind_from_irqhandler(tp->irq, tp);
++	/* Prevent other threads from re-opening this tty. */
++	set_bit(TTY_CLOSING, &tty->flags);
++	mutex_unlock(&tty_mutex);
 +
-+	tp->irq = 0;
++	tty->closing = 1;
++	tty_wait_until_sent(tty, 0);
++	if (DRV(tty->driver)->flush_buffer != NULL)
++		DRV(tty->driver)->flush_buffer(tty);
++	if (tty->ldisc.flush_buffer != NULL)
++		tty->ldisc.flush_buffer(tty);
++	tty->closing = 0;
++	spin_lock_irqsave(&xencons_lock, flags);
++	xencons_tty = NULL;
++	spin_unlock_irqrestore(&xencons_lock, flags);
 +}
 +
++static struct tty_operations xencons_ops = {
++	.open = xencons_open,
++	.close = xencons_close,
++	.write = xencons_write,
++	.write_room = xencons_write_room,
++	.put_char = xencons_put_char,
++	.flush_chars = xencons_flush_chars,
++	.chars_in_buffer = xencons_chars_in_buffer,
++	.send_xchar = xencons_send_xchar,
++	.flush_buffer = xencons_flush_buffer,
++	.throttle = xencons_throttle,
++	.unthrottle = xencons_unthrottle,
++	.wait_until_sent = xencons_wait_until_sent,
++};
 +
-+static int talk_to_backend(struct xenbus_device *dev,
-+                           struct tpm_private *tp)
++static int __init xencons_init(void)
 +{
-+	const char *message = NULL;
-+	int err;
-+	struct xenbus_transaction xbt;
++	int rc;
 +
-+	err = setup_tpmring(dev, tp);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "setting up ring");
-+		goto out;
-+	}
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_tpmring;
++	if (xc_mode == XC_OFF)
++		return 0;
++
++	if (!is_initial_xendomain()) {
++		rc = xencons_ring_init();
++		if (rc)
++			return rc;
 +	}
 +
-+	err = xenbus_printf(xbt, dev->nodename,
-+	                    "ring-ref","%u", tp->ring_ref);
-+	if (err) {
-+		message = "writing ring-ref";
-+		goto abort_transaction;
++	xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
++					  MAX_NR_CONSOLES : 1);
++	if (xencons_driver == NULL)
++		return -ENOMEM;
++
++	DRV(xencons_driver)->name            = "xencons";
++	DRV(xencons_driver)->major           = TTY_MAJOR;
++	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
++	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
++	DRV(xencons_driver)->init_termios    = tty_std_termios;
++	DRV(xencons_driver)->flags           =
++		TTY_DRIVER_REAL_RAW |
++		TTY_DRIVER_RESET_TERMIOS;
++	DRV(xencons_driver)->termios         = xencons_termios;
++	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
++
++	switch (xc_mode) {
++	case XC_XVC:
++		DRV(xencons_driver)->name        = "xvc";
++		DRV(xencons_driver)->major       = XEN_XVC_MAJOR;
++		DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
++		DRV(xencons_driver)->name_base   = xc_num;
++		break;
++	case XC_SERIAL:
++		DRV(xencons_driver)->name        = "ttyS";
++		DRV(xencons_driver)->minor_start = 64 + xc_num;
++		DRV(xencons_driver)->name_base   = xc_num;
++		break;
++	default:
++		DRV(xencons_driver)->name        = "tty";
++		DRV(xencons_driver)->minor_start = 1;
++		DRV(xencons_driver)->name_base   = 1;
++		break;
 +	}
 +
-+	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+			    irq_to_evtchn_port(tp->irq));
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
++	tty_set_operations(xencons_driver, &xencons_ops);
++
++	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
++		printk("WARNING: Failed to register Xen virtual "
++		       "console driver as '%s%d'\n",
++		       DRV(xencons_driver)->name,
++		       DRV(xencons_driver)->name_base);
++		put_tty_driver(xencons_driver);
++		xencons_driver = NULL;
++		return rc;
 +	}
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_tpmring;
++	if (is_initial_xendomain()) {
++		xencons_priv_irq = bind_virq_to_irqhandler(
++			VIRQ_CONSOLE,
++			0,
++			xencons_priv_interrupt,
++			0,
++			"console",
++			NULL);
++		BUG_ON(xencons_priv_irq < 0);
 +	}
 +
-+	xenbus_switch_state(dev, XenbusStateConnected);
++	printk("Xen virtual console successfully installed as %s%d\n",
++	       DRV(xencons_driver)->name, xc_num);
 +
 +	return 0;
-+
-+abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	if (message)
-+		xenbus_dev_error(dev, err, "%s", message);
-+destroy_tpmring:
-+	destroy_tpmring(tp);
-+out:
-+	return err;
 +}
 +
-+/**
-+ * Callback received when the backend's state changes.
++module_init(xencons_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/console/xencons_ring.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/console/xencons_ring.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,143 @@
++/* 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+static void backend_changed(struct xenbus_device *dev,
-+			    enum xenbus_state backend_state)
-+{
-+	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+	DPRINTK("\n");
 +
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitWait:
-+	case XenbusStateInitialised:
-+	case XenbusStateUnknown:
-+		break;
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
 +
-+	case XenbusStateConnected:
-+		tpmif_set_connected_state(tp, 1);
-+		break;
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xencons.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <xen/interface/io/console.h>
++
++static int xencons_irq;
 +
-+	case XenbusStateClosing:
-+		tpmif_set_connected_state(tp, 0);
-+		xenbus_frontend_closed(dev);
-+		break;
++static inline struct xencons_interface *xencons_interface(void)
++{
++	return mfn_to_virt(xen_start_info->console.domU.mfn);
++}
 +
-+	case XenbusStateClosed:
-+		tpmif_set_connected_state(tp, 0);
-+		if (tp->is_suspended == 0)
-+			device_unregister(&dev->dev);
-+		xenbus_frontend_closed(dev);
-+		break;
-+	}
++static inline void notify_daemon(void)
++{
++	/* Use evtchn: this is called early, before irq is set up. */
++	notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
 +}
 +
-+static int tpmfront_probe(struct xenbus_device *dev,
-+                          const struct xenbus_device_id *id)
++int xencons_ring_send(const char *data, unsigned len)
 +{
-+	int err;
-+	int handle;
-+	struct tpm_private *tp = tpm_private_get();
++	int sent = 0;
++	struct xencons_interface *intf = xencons_interface();
++	XENCONS_RING_IDX cons, prod;
 +
-+	if (!tp)
-+		return -ENOMEM;
++	cons = intf->out_cons;
++	prod = intf->out_prod;
++	mb();
++	BUG_ON((prod - cons) > sizeof(intf->out));
 +
-+	tp->chip = init_vtpm(&dev->dev, tp);
-+	if (IS_ERR(tp->chip))
-+		return PTR_ERR(tp->chip);
++	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
++		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
 +
-+	err = xenbus_scanf(XBT_NIL, dev->nodename,
-+	                   "handle", "%i", &handle);
-+	if (XENBUS_EXIST_ERR(err))
-+		return err;
++	wmb();
++	intf->out_prod = prod;
 +
-+	if (err < 0) {
-+		xenbus_dev_fatal(dev,err,"reading virtual-device");
-+		return err;
-+	}
++	notify_daemon();
 +
-+	tp->dev = dev;
++	return sent;
++}
 +
-+	err = talk_to_backend(dev, tp);
-+	if (err) {
-+		tpm_private_put();
-+		return err;
++static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++{
++	struct xencons_interface *intf = xencons_interface();
++	XENCONS_RING_IDX cons, prod;
++
++	cons = intf->in_cons;
++	prod = intf->in_prod;
++	mb();
++	BUG_ON((prod - cons) > sizeof(intf->in));
++
++	while (cons != prod) {
++		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++		cons++;
 +	}
 +
-+	return 0;
-+}
++	mb();
++	intf->in_cons = cons;
 +
++	notify_daemon();
 +
-+static int tpmfront_remove(struct xenbus_device *dev)
-+{
-+	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+	destroy_tpmring(tp);
-+	cleanup_vtpm(&dev->dev);
-+	return 0;
++	xencons_tx();
++
++	return IRQ_HANDLED;
 +}
 +
-+static int tpmfront_suspend(struct xenbus_device *dev)
++int xencons_ring_init(void)
 +{
-+	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+	u32 ctr;
++	int irq;
 +
-+	/* Take the lock, preventing any application from sending. */
-+	mutex_lock(&suspend_lock);
-+	tp->is_suspended = 1;
++	if (xencons_irq)
++		unbind_from_irqhandler(xencons_irq, NULL);
++	xencons_irq = 0;
 +
-+	for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
-+		if ((ctr % 10) == 0)
-+			printk("TPM-FE [INFO]: Waiting for outstanding "
-+			       "request.\n");
-+		/* Wait for a request to be responded to. */
-+		interruptible_sleep_on_timeout(&tp->wait_q, 100);
++	if (!is_running_on_xen() ||
++	    is_initial_xendomain() ||
++	    !xen_start_info->console.domU.evtchn)
++		return -ENODEV;
++
++	irq = bind_caller_port_to_irqhandler(
++		xen_start_info->console.domU.evtchn,
++		handle_input, 0, "xencons", NULL);
++	if (irq < 0) {
++		printk(KERN_ERR "XEN console request irq failed %i\n", irq);
++		return irq;
 +	}
 +
-+	return 0;
-+}
++	xencons_irq = irq;
++
++	/* In case we have in-flight data after save/restore... */
++	notify_daemon();
 +
-+static int tpmfront_suspend_finish(struct tpm_private *tp)
-+{
-+	tp->is_suspended = 0;
-+	/* Allow applications to send again. */
-+	mutex_unlock(&suspend_lock);
 +	return 0;
 +}
 +
-+static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++void xencons_resume(void)
 +{
-+	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+	return tpmfront_suspend_finish(tp);
++	(void)xencons_ring_init();
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,14 @@
++#
++# Makefile for the linux kernel.
++#
 +
-+static int tpmfront_resume(struct xenbus_device *dev)
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
++
++obj-$(CONFIG_PCI)		+= pci.o
++obj-$(CONFIG_PROC_FS)		+= xen_proc.o
++obj-$(CONFIG_SYS_HYPERVISOR)	+= hypervisor_sysfs.o
++obj-$(CONFIG_HOTPLUG_CPU)	+= cpu_hotplug.o
++obj-$(CONFIG_XEN_SYSFS)		+= xen_sysfs.o
++obj-$(CONFIG_XEN_SMPBOOT)	+= smpboot.o
++obj-$(CONFIG_KEXEC)		+= machine_kexec.o
++obj-$(CONFIG_XEN_XENCOMM)	+= xencomm.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/cpu_hotplug.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/cpu_hotplug.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,173 @@
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++/*
++ * Set of CPUs that remote admin software will allow us to bring online.
++ * Notified to us via xenbus.
++ */
++static cpumask_t xenbus_allowed_cpumask;
++
++/* Set of CPUs that local admin will allow us to bring online. */
++static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
++
++static int local_cpu_hotplug_request(void)
 +{
-+	struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+	destroy_tpmring(tp);
-+	return talk_to_backend(dev, tp);
++	/*
++	 * We assume a CPU hotplug request comes from local admin if it is made
++	 * via a userspace process (i.e., one with a real mm_struct).
++	 */
++	return (current->mm != NULL);
 +}
 +
-+static int tpmif_connect(struct xenbus_device *dev,
-+                         struct tpm_private *tp,
-+                         domid_t domid)
++static void vcpu_hotplug(unsigned int cpu)
 +{
 +	int err;
++	char dir[32], state[32];
 +
-+	tp->backend_id = domid;
++	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
++		return;
 +
-+	err = bind_listening_port_to_irqhandler(
-+		domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
-+	if (err <= 0) {
-+		WPRINTK("bind_listening_port_to_irqhandler failed "
-+			"(err=%d)\n", err);
-+		return err;
++	sprintf(dir, "cpu/%u", cpu);
++	err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
++	if (err != 1) {
++		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
++		return;
 +	}
-+	tp->irq = err;
-+
-+	return 0;
-+}
-+
-+static struct xenbus_device_id tpmfront_ids[] = {
-+	{ "vtpm" },
-+	{ "" }
-+};
-+
-+static struct xenbus_driver tpmfront = {
-+	.name = "vtpm",
-+	.owner = THIS_MODULE,
-+	.ids = tpmfront_ids,
-+	.probe = tpmfront_probe,
-+	.remove =  tpmfront_remove,
-+	.resume = tpmfront_resume,
-+	.otherend_changed = backend_changed,
-+	.suspend = tpmfront_suspend,
-+	.suspend_cancel = tpmfront_suspend_cancel,
-+};
 +
-+static void __init init_tpm_xenbus(void)
-+{
-+	xenbus_register_frontend(&tpmfront);
++	if (strcmp(state, "online") == 0) {
++		cpu_set(cpu, xenbus_allowed_cpumask);
++		(void)cpu_up(cpu);
++	} else if (strcmp(state, "offline") == 0) {
++		cpu_clear(cpu, xenbus_allowed_cpumask);
++		(void)cpu_down(cpu);
++	} else {
++		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
++		       state, cpu);
++	}
 +}
 +
-+static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
++static void handle_vcpu_hotplug_event(
++	struct xenbus_watch *watch, const char **vec, unsigned int len)
 +{
-+	unsigned int i;
++	unsigned int cpu;
++	char *cpustr;
++	const char *node = vec[XS_WATCH_PATH];
 +
-+	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
-+		tp->tx_buffers[i] = tx_buffer_alloc();
-+		if (!tp->tx_buffers[i]) {
-+			tpmif_free_tx_buffers(tp);
-+			return -ENOMEM;
-+		}
++	if ((cpustr = strstr(node, "cpu/")) != NULL) {
++		sscanf(cpustr, "cpu/%u", &cpu);
++		vcpu_hotplug(cpu);
 +	}
-+	return 0;
 +}
 +
-+static void tpmif_free_tx_buffers(struct tpm_private *tp)
++static int smpboot_cpu_notify(struct notifier_block *notifier,
++			      unsigned long action, void *hcpu)
 +{
-+	unsigned int i;
++	unsigned int cpu = (long)hcpu;
 +
-+	for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
-+		tx_buffer_free(tp->tx_buffers[i]);
++	/*
++	 * We do this in a callback notifier rather than __cpu_disable()
++	 * because local_cpu_hotplug_request() does not work in the latter
++	 * as it's always executed from within a stopmachine kthread.
++	 */
++	if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
++		cpu_clear(cpu, local_allowed_cpumask);
++
++	return NOTIFY_OK;
 +}
 +
-+static void tpmif_rx_action(unsigned long priv)
++static int setup_cpu_watcher(struct notifier_block *notifier,
++			      unsigned long event, void *data)
 +{
-+	struct tpm_private *tp = (struct tpm_private *)priv;
-+	int i = 0;
-+	unsigned int received;
-+	unsigned int offset = 0;
-+	u8 *buffer;
-+	tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
-+
-+	atomic_set(&tp->tx_busy, 0);
-+	wake_up_interruptible(&tp->wait_q);
-+
-+	received = tx->size;
++	unsigned int i;
 +
-+	buffer = kmalloc(received, GFP_ATOMIC);
-+	if (!buffer)
-+		return;
++	static struct xenbus_watch cpu_watch = {
++		.node = "cpu",
++		.callback = handle_vcpu_hotplug_event,
++		.flags = XBWF_new_thread };
++	(void)register_xenbus_watch(&cpu_watch);
 +
-+	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
-+		struct tx_buffer *txb = tp->tx_buffers[i];
-+		tpmif_tx_request_t *tx;
-+		unsigned int tocopy;
++	if (!is_initial_xendomain()) {
++		for_each_possible_cpu(i)
++			vcpu_hotplug(i);
++		printk(KERN_INFO "Brought up %ld CPUs\n",
++		       (long)num_online_cpus());
++	}
 +
-+		tx = &tp->tx->ring[i].req;
-+		tocopy = tx->size;
-+		if (tocopy > PAGE_SIZE)
-+			tocopy = PAGE_SIZE;
++	return NOTIFY_DONE;
++}
 +
-+		memcpy(&buffer[offset], txb->data, tocopy);
++static int __init setup_vcpu_hotplug_event(void)
++{
++	static struct notifier_block hotplug_cpu = {
++		.notifier_call = smpboot_cpu_notify };
++	static struct notifier_block xsn_cpu = {
++		.notifier_call = setup_cpu_watcher };
 +
-+		gnttab_release_grant_reference(&gref_head, tx->ref);
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+		offset += tocopy;
-+	}
++	register_cpu_notifier(&hotplug_cpu);
++	register_xenstore_notifier(&xsn_cpu);
 +
-+	vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
-+	kfree(buffer);
++	return 0;
 +}
 +
++arch_initcall(setup_vcpu_hotplug_event);
 +
-+static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++int smp_suspend(void)
 +{
-+	struct tpm_private *tp = tpm_priv;
-+	unsigned long flags;
++	unsigned int cpu;
++	int err;
 +
-+	spin_lock_irqsave(&tp->tx_lock, flags);
-+	tpmif_rx_tasklet.data = (unsigned long)tp;
-+	tasklet_schedule(&tpmif_rx_tasklet);
-+	spin_unlock_irqrestore(&tp->tx_lock, flags);
++	for_each_online_cpu(cpu) {
++		if (cpu == 0)
++			continue;
++		err = cpu_down(cpu);
++		if (err) {
++			printk(KERN_CRIT "Failed to take all CPUs "
++			       "down: %d.\n", err);
++			for_each_possible_cpu(cpu)
++				vcpu_hotplug(cpu);
++			return err;
++		}
++	}
 +
-+	return IRQ_HANDLED;
++	return 0;
 +}
 +
-+
-+static int tpm_xmit(struct tpm_private *tp,
-+                    const u8 * buf, size_t count, int isuserbuffer,
-+                    void *remember)
++void smp_resume(void)
 +{
-+	tpmif_tx_request_t *tx;
-+	TPMIF_RING_IDX i;
-+	unsigned int offset = 0;
++	unsigned int cpu;
 +
-+	spin_lock_irq(&tp->tx_lock);
++	for_each_possible_cpu(cpu)
++		vcpu_hotplug(cpu);
++}
 +
-+	if (unlikely(atomic_read(&tp->tx_busy))) {
-+		printk("tpm_xmit: There's an outstanding request/response "
-+		       "on the way!\n");
-+		spin_unlock_irq(&tp->tx_lock);
-+		return -EBUSY;
-+	}
++int cpu_up_check(unsigned int cpu)
++{
++	int rc = 0;
 +
-+	if (tp->is_connected != 1) {
-+		spin_unlock_irq(&tp->tx_lock);
-+		return -EIO;
++	if (local_cpu_hotplug_request()) {
++		cpu_set(cpu, local_allowed_cpumask);
++		if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
++			printk("%s: attempt to bring up CPU %u disallowed by "
++			       "remote admin.\n", __FUNCTION__, cpu);
++			rc = -EBUSY;
++		}
++	} else if (!cpu_isset(cpu, local_allowed_cpumask) ||
++		   !cpu_isset(cpu, xenbus_allowed_cpumask)) {
++		rc = -EBUSY;
 +	}
 +
-+	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
-+		struct tx_buffer *txb = tp->tx_buffers[i];
-+		int copied;
++	return rc;
++}
 +
-+		if (!txb) {
-+			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
-+				"Not transmitting anything!\n", i);
-+			spin_unlock_irq(&tp->tx_lock);
-+			return -EFAULT;
-+		}
++void init_xenbus_allowed_cpumask(void)
++{
++	xenbus_allowed_cpumask = cpu_present_map;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/evtchn.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/evtchn.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1129 @@
++/******************************************************************************
++ * evtchn.c
++ * 
++ * Communication via Xen event channels.
++ * 
++ * Copyright (c) 2002-2005, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+		copied = tx_buffer_copy(txb, &buf[offset], count,
-+		                        isuserbuffer);
-+		if (copied < 0) {
-+			/* An error occurred */
-+			spin_unlock_irq(&tp->tx_lock);
-+			return copied;
-+		}
-+		count -= copied;
-+		offset += copied;
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/version.h>
++#include <asm/atomic.h>
++#include <asm/system.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/evtchn.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++#include <linux/mc146818rtc.h> /* RTC_IRQ */
 +
-+		tx = &tp->tx->ring[i].req;
-+		tx->addr = virt_to_machine(txb->data);
-+		tx->size = txb->len;
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static DEFINE_SPINLOCK(irq_mapping_update_lock);
 +
-+		DPRINTK("First 4 characters sent by TPM-FE are "
-+			"0x%02x 0x%02x 0x%02x 0x%02x\n",
-+		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++/* IRQ <-> event-channel mappings. */
++static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
++	[0 ...  NR_EVENT_CHANNELS-1] = -1 };
 +
-+		/* Get the granttable reference for this page. */
-+		tx->ref = gnttab_claim_grant_reference(&gref_head);
-+		if (tx->ref == -ENOSPC) {
-+			spin_unlock_irq(&tp->tx_lock);
-+			DPRINTK("Grant table claim reference failed in "
-+				"func:%s line:%d file:%s\n",
-+				__FUNCTION__, __LINE__, __FILE__);
-+			return -ENOSPC;
-+		}
-+		gnttab_grant_foreign_access_ref(tx->ref,
-+						tp->backend_id,
-+						virt_to_mfn(txb->data),
-+						0 /*RW*/);
-+		wmb();
-+	}
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
 +
-+	atomic_set(&tp->tx_busy, 1);
-+	tp->tx_remember = remember;
++/* Binding types. */
++enum {
++	IRQT_UNBOUND,
++	IRQT_PIRQ,
++	IRQT_VIRQ,
++	IRQT_IPI,
++	IRQT_LOCAL_PORT,
++	IRQT_CALLER_PORT,
++	_IRQT_COUNT
++};
 +
-+	mb();
++#define _IRQT_BITS 4
++#define _EVTCHN_BITS 12
++#define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
 +
-+	notify_remote_via_irq(tp->irq);
++/* Constructor for packed IRQ information. */
++static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
++{
++	BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
 +
-+	spin_unlock_irq(&tp->tx_lock);
-+	return offset;
-+}
++	BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
++	BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
++	BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
++	BUG_ON(index >> _INDEX_BITS);
 +
++	BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
 +
-+static void tpmif_notify_upperlayer(struct tpm_private *tp)
-+{
-+	/* Notify upper layer about the state of the connection to the BE. */
-+	vtpm_vd_status(tp->chip, (tp->is_connected
-+				  ? TPM_VD_STATUS_CONNECTED
-+				  : TPM_VD_STATUS_DISCONNECTED));
++	return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
 +}
 +
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
 +
-+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
-+{
-+	/*
-+	 * Don't notify upper layer if we are in suspend mode and
-+	 * should disconnect - assumption is that we will resume
-+	 * The mutex keeps apps from sending.
-+	 */
-+	if (is_connected == 0 && tp->is_suspended == 1)
-+		return;
-+
-+	/*
-+	 * Unlock the mutex if we are connected again
-+	 * after being suspended - now resuming.
-+	 * This also removes the suspend state.
-+	 */
-+	if (is_connected == 1 && tp->is_suspended == 1)
-+		tpmfront_suspend_finish(tp);
++/*
++ * Accessors for packed IRQ information.
++ */
 +
-+	if (is_connected != tp->is_connected) {
-+		tp->is_connected = is_connected;
-+		tpmif_notify_upperlayer(tp);
-+	}
++static inline unsigned int evtchn_from_irq(int irq)
++{
++	return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
 +}
 +
++static inline unsigned int index_from_irq(int irq)
++{
++	return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
++}
 +
++static inline unsigned int type_from_irq(int irq)
++{
++	return irq_info[irq] >> (32 - _IRQT_BITS);
++}
 +
-+/* =================================================================
-+ * Initialization function.
-+ * =================================================================
-+ */
++/* IRQ <-> VIRQ mapping. */
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
 +
++/* IRQ <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1
++#endif
++DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
 +
-+static int __init tpmif_init(void)
-+{
-+	struct tpm_private *tp;
++/* Reference counts for bindings to IRQs. */
++static int irq_bindcount[NR_IRQS];
 +
-+	if (is_initial_xendomain())
-+		return -EPERM;
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
 +
-+	tp = tpm_private_get();
-+	if (!tp)
-+		return -ENOMEM;
++#ifdef CONFIG_SMP
 +
-+	IPRINTK("Initialising the vTPM driver.\n");
-+	if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
-+					  &gref_head) < 0) {
-+		tpm_private_put();
-+		return -EFAULT;
-+	}
++static u8 cpu_evtchn[NR_EVENT_CHANNELS];
++static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
 +
-+	init_tpm_xenbus();
-+	return 0;
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++					   unsigned int idx)
++{
++	return (sh->evtchn_pending[idx] &
++		cpu_evtchn_mask[cpu][idx] &
++		~sh->evtchn_mask[idx]);
 +}
 +
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	int irq = evtchn_to_irq[chn];
 +
-+module_init(tpmif_init);
++	BUG_ON(!test_bit(chn, s->evtchn_mask));
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/char/tty_io.c tmp-linux-2.6-xen.patch/drivers/char/tty_io.c
---- pristine-linux-2.6.18.2/drivers/char/tty_io.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/char/tty_io.c	2007-07-30 16:35:11.000000000 +0200
-@@ -130,6 +130,8 @@ LIST_HEAD(tty_drivers);			/* linked list
-    vt.c for deeply disgusting hack reasons */
- DEFINE_MUTEX(tty_mutex);
- 
-+int console_use_vt = 1;
++	if (irq != -1)
++		set_native_irq_info(irq, cpumask_of_cpu(cpu));
 +
- #ifdef CONFIG_UNIX98_PTYS
- extern struct tty_driver *ptm_driver;	/* Unix98 pty masters; for /dev/ptmx */
- extern int pty_limit;		/* Config limit on Unix98 ptys */
-@@ -2483,7 +2485,7 @@ retry_open:
- 		goto got_driver;
- 	}
- #ifdef CONFIG_VT
--	if (device == MKDEV(TTY_MAJOR,0)) {
-+	if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
- 		extern struct tty_driver *console_driver;
- 		driver = console_driver;
- 		index = fg_console;
-@@ -3909,6 +3911,8 @@ static int __init tty_init(void)
- #endif
- 
- #ifdef CONFIG_VT
-+	if (!console_use_vt)
-+		goto out_vt;
- 	cdev_init(&vc0_cdev, &console_fops);
- 	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
- 	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
-@@ -3916,6 +3920,7 @@ static int __init tty_init(void)
- 	class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
- 
- 	vty_init();
-+ out_vt:
- #endif
- 	return 0;
- }
-diff -Nurp pristine-linux-2.6.18.2/drivers/firmware/Kconfig tmp-linux-2.6-xen.patch/drivers/firmware/Kconfig
---- pristine-linux-2.6.18.2/drivers/firmware/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/firmware/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -8,6 +8,7 @@ menu "Firmware Drivers"
- config EDD
- 	tristate "BIOS Enhanced Disk Drive calls determine boot disk"
- 	depends on !IA64
-+	depends on !XEN
- 	help
- 	  Say Y or M here if you want to enable BIOS Enhanced Disk Drive
- 	  Services real mode BIOS calls to determine which disk
-diff -Nurp pristine-linux-2.6.18.2/drivers/ide/ide-lib.c tmp-linux-2.6-xen.patch/drivers/ide/ide-lib.c
---- pristine-linux-2.6.18.2/drivers/ide/ide-lib.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/ide/ide-lib.c	2007-10-14 01:51:15.000000000 +0200
-@@ -408,10 +408,10 @@ void ide_toggle_bounce(ide_drive_t *driv
- {
- 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
- 
--	if (!PCI_DMA_BUS_IS_PHYS) {
--		addr = BLK_BOUNCE_ANY;
--	} else if (on && drive->media == ide_disk) {
--		if (HWIF(drive)->pci_dev)
-+	if (on && drive->media == ide_disk) {
-+		if (!PCI_DMA_BUS_IS_PHYS)
-+			addr = BLK_BOUNCE_ANY;
-+		else if (HWIF(drive)->pci_dev)
- 			addr = HWIF(drive)->pci_dev->dma_mask;
- 	}
- 
-diff -Nurp pristine-linux-2.6.18.2/drivers/Makefile tmp-linux-2.6-xen.patch/drivers/Makefile
---- pristine-linux-2.6.18.2/drivers/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -31,6 +31,7 @@ obj-y				+= base/ block/ misc/ mfd/ net/
- obj-$(CONFIG_NUBUS)		+= nubus/
- obj-$(CONFIG_ATM)		+= atm/
- obj-$(CONFIG_PPC_PMAC)		+= macintosh/
-+obj-$(CONFIG_XEN)		+= xen/
- obj-$(CONFIG_IDE)		+= ide/
- obj-$(CONFIG_FC4)		+= fc4/
- obj-$(CONFIG_SCSI)		+= scsi/
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/buffer_sync.c tmp-linux-2.6-xen.patch/drivers/oprofile/buffer_sync.c
---- pristine-linux-2.6.18.2/drivers/oprofile/buffer_sync.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/buffer_sync.c	2007-10-14 01:51:15.000000000 +0200
-@@ -6,6 +6,10 @@
-  *
-  * @author John Levon <levon at movementarian.org>
-  *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
-  * This is the core of the buffer management. Each
-  * CPU buffer is processed and entered into the
-  * global event buffer. Such processing is necessary
-@@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
- static DEFINE_SPINLOCK(task_mortuary);
- static void process_task_mortuary(void);
- 
-+static int cpu_current_domain[NR_CPUS];
- 
- /* Take ownership of the task struct and place it on the
-  * list for processing. Only after two full buffer syncs
-@@ -146,6 +151,11 @@ static void end_sync(void)
- int sync_start(void)
- {
- 	int err;
++	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
++	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
++	cpu_evtchn[chn] = cpu;
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
 +	int i;
 +
-+	for (i = 0; i < NR_CPUS; i++) {
-+		cpu_current_domain[i] = COORDINATOR_DOMAIN;
-+	}
- 
- 	start_cpu_work();
- 
-@@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
- 	last_cookie = INVALID_COOKIE;
- }
- 
--static void add_kernel_ctx_switch(unsigned int in_kernel)
-+static void add_cpu_mode_switch(unsigned int cpu_mode)
- {
- 	add_event_entry(ESCAPE_CODE);
--	if (in_kernel)
--		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
--	else
--		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
-+	switch (cpu_mode) {
-+	case CPU_MODE_USER:
-+		add_event_entry(USER_ENTER_SWITCH_CODE);
-+		break;
-+	case CPU_MODE_KERNEL:
-+		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
-+		break;
-+	case CPU_MODE_XEN:
-+		add_event_entry(XEN_ENTER_SWITCH_CODE);
-+	  	break;
-+	default:
-+		break;
-+	}
- }
-- 
++	/* By default all event channels notify CPU#0. */
++	for (i = 0; i < NR_IRQS; i++)
++		set_native_irq_info(i, cpumask_of_cpu(0));
 +
-+static void add_domain_switch(unsigned long domain_id)
++	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
++	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
 +{
-+	add_event_entry(ESCAPE_CODE);
-+	add_event_entry(DOMAIN_SWITCH_CODE);
-+	add_event_entry(domain_id);
++	return cpu_evtchn[evtchn];
 +}
 +
- static void
- add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
- {
-@@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
-  * for later lookup from userspace.
-  */
- static int
--add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
-+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
- {
--	if (in_kernel) {
-+	if (cpu_mode >= CPU_MODE_KERNEL) {
- 		add_sample_entry(s->eip, s->event);
- 		return 1;
- 	} else if (mm) {
-@@ -496,15 +522,21 @@ void sync_buffer(int cpu)
- 	struct mm_struct *mm = NULL;
- 	struct task_struct * new;
- 	unsigned long cookie = 0;
--	int in_kernel = 1;
-+	int cpu_mode = 1;
- 	unsigned int i;
- 	sync_buffer_state state = sb_buffer_start;
- 	unsigned long available;
-+	int domain_switch = 0;
- 
- 	mutex_lock(&buffer_mutex);
-  
- 	add_cpu_switch(cpu);
- 
-+	/* We need to assign the first samples in this CPU buffer to the
-+	   same domain that we were processing at the last sync_buffer */
-+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
-+		add_domain_switch(cpu_current_domain[cpu]);
-+	}
- 	/* Remember, only we can modify tail_pos */
- 
- 	available = get_slots(cpu_buf);
-@@ -512,16 +544,18 @@ void sync_buffer(int cpu)
- 	for (i = 0; i < available; ++i) {
- 		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
-  
--		if (is_code(s->eip)) {
--			if (s->event <= CPU_IS_KERNEL) {
--				/* kernel/userspace switch */
--				in_kernel = s->event;
-+		if (is_code(s->eip) && !domain_switch) {
-+			if (s->event <= CPU_MODE_XEN) {
-+				/* xen/kernel/userspace switch */
-+				cpu_mode = s->event;
- 				if (state == sb_buffer_start)
- 					state = sb_sample_start;
--				add_kernel_ctx_switch(s->event);
-+				add_cpu_mode_switch(s->event);
- 			} else if (s->event == CPU_TRACE_BEGIN) {
- 				state = sb_bt_start;
- 				add_trace_begin();
-+			} else if (s->event == CPU_DOMAIN_SWITCH) {
-+					domain_switch = 1;				
- 			} else {
- 				struct mm_struct * oldmm = mm;
- 
-@@ -535,11 +569,21 @@ void sync_buffer(int cpu)
- 				add_user_ctx_switch(new, cookie);
- 			}
- 		} else {
--			if (state >= sb_bt_start &&
--			    !add_sample(mm, s, in_kernel)) {
--				if (state == sb_bt_start) {
--					state = sb_bt_ignore;
--					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
-+			if (domain_switch) {
-+				cpu_current_domain[cpu] = s->eip;
-+				add_domain_switch(s->eip);
-+				domain_switch = 0;
-+			} else {
-+				if (cpu_current_domain[cpu] !=
-+				    COORDINATOR_DOMAIN) {
-+					add_sample_entry(s->eip, s->event);
-+				}
-+				else  if (state >= sb_bt_start &&
-+				    !add_sample(mm, s, cpu_mode)) {
-+					if (state == sb_bt_start) {
-+						state = sb_bt_ignore;
-+						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
-+					}
- 				}
- 			}
- 		}
-@@ -548,6 +592,11 @@ void sync_buffer(int cpu)
- 	}
- 	release_mm(mm);
- 
-+	/* We reset domain to COORDINATOR at each CPU switch */
-+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
-+		add_domain_switch(COORDINATOR_DOMAIN);
-+	}
++#else
 +
- 	mark_done(cpu);
- 
- 	mutex_unlock(&buffer_mutex);
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/cpu_buffer.c tmp-linux-2.6-xen.patch/drivers/oprofile/cpu_buffer.c
---- pristine-linux-2.6.18.2/drivers/oprofile/cpu_buffer.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/cpu_buffer.c	2007-10-14 01:51:15.000000000 +0200
-@@ -6,6 +6,10 @@
-  *
-  * @author John Levon <levon at movementarian.org>
-  *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
-  * Each CPU has a local buffer that stores PC value/event
-  * pairs. We also log context switches when we notice them.
-  * Eventually each CPU's buffer is processed into the global
-@@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
- #define DEFAULT_TIMER_EXPIRE (HZ / 10)
- static int work_enabled;
- 
-+static int32_t current_domain = COORDINATOR_DOMAIN;
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++					   unsigned int idx)
++{
++	return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
++}
 +
- void free_cpu_buffers(void)
- {
- 	int i;
-@@ -57,7 +63,7 @@ int alloc_cpu_buffers(void)
- 			goto fail;
-  
- 		b->last_task = NULL;
--		b->last_is_kernel = -1;
-+		b->last_cpu_mode = -1;
- 		b->tracing = 0;
- 		b->buffer_size = buffer_size;
- 		b->tail_pos = 0;
-@@ -113,7 +119,7 @@ void cpu_buffer_reset(struct oprofile_cp
- 	 * collected will populate the buffer with proper
- 	 * values to initialize the buffer
- 	 */
--	cpu_buf->last_is_kernel = -1;
-+	cpu_buf->last_cpu_mode = -1;
- 	cpu_buf->last_task = NULL;
- }
- 
-@@ -163,13 +169,13 @@ add_code(struct oprofile_cpu_buffer * bu
-  * because of the head/tail separation of the writer and reader
-  * of the CPU buffer.
-  *
-- * is_kernel is needed because on some architectures you cannot
-+ * cpu_mode is needed because on some architectures you cannot
-  * tell if you are in kernel or user space simply by looking at
-- * pc. We tag this in the buffer by generating kernel enter/exit
-- * events whenever is_kernel changes
-+ * pc. We tag this in the buffer by generating kernel/user (and xen)
-+ *  enter events whenever cpu_mode changes
-  */
- static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
--		      int is_kernel, unsigned long event)
-+		      int cpu_mode, unsigned long event)
- {
- 	struct task_struct * task;
- 
-@@ -180,18 +186,18 @@ static int log_sample(struct oprofile_cp
- 		return 0;
- 	}
- 
--	is_kernel = !!is_kernel;
--
- 	task = current;
- 
- 	/* notice a switch from user->kernel or vice versa */
--	if (cpu_buf->last_is_kernel != is_kernel) {
--		cpu_buf->last_is_kernel = is_kernel;
--		add_code(cpu_buf, is_kernel);
-+	if (cpu_buf->last_cpu_mode != cpu_mode) {
-+		cpu_buf->last_cpu_mode = cpu_mode;
-+		add_code(cpu_buf, cpu_mode);
- 	}
--
-+	
- 	/* notice a task switch */
--	if (cpu_buf->last_task != task) {
-+	/* if not processing other domain samples */
-+	if ((cpu_buf->last_task != task) &&
-+	    (current_domain == COORDINATOR_DOMAIN)) {
- 		cpu_buf->last_task = task;
- 		add_code(cpu_buf, (unsigned long)task);
- 	}
-@@ -275,6 +281,25 @@ void oprofile_add_trace(unsigned long pc
- 	add_sample(cpu_buf, pc, 0);
- }
- 
-+int oprofile_add_domain_switch(int32_t domain_id)
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 +{
-+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
++}
 +
-+	/* should have space for switching into and out of domain 
-+	   (2 slots each) plus one sample and one cpu mode switch */
-+	if (((nr_available_slots(cpu_buf) < 6) && 
-+	     (domain_id != COORDINATOR_DOMAIN)) ||
-+	    (nr_available_slots(cpu_buf) < 2))
-+		return 0;
++static void init_evtchn_cpu_bindings(void)
++{
++}
 +
-+	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
-+	add_sample(cpu_buf, domain_id, 0);
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++	return 0;
++}
 +
-+	current_domain = domain_id;
++#endif
 +
-+	return 1;
++/* Upcall to generic IRQ layer. */
++#ifdef CONFIG_X86
++extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
++void __init xen_init_IRQ(void);
++void __init init_IRQ(void)
++{
++	irq_ctx_init(0);
++	xen_init_IRQ();
 +}
++#if defined (__i386__)
++static inline void exit_idle(void) {}
++#define IRQ_REG orig_eax
++#elif defined (__x86_64__)
++#include <asm/idle.h>
++#define IRQ_REG orig_rax
++#endif
++#define do_IRQ(irq, regs) do {		\
++	(regs)->IRQ_REG = ~(irq);	\
++	do_IRQ((regs));			\
++} while (0)
++#endif
 +
- /*
-  * This serves to avoid cpu buffer overflow, and makes sure
-  * the task mortuary progresses
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/cpu_buffer.h tmp-linux-2.6-xen.patch/drivers/oprofile/cpu_buffer.h
---- pristine-linux-2.6.18.2/drivers/oprofile/cpu_buffer.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/cpu_buffer.h	2007-10-14 01:51:15.000000000 +0200
-@@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
- 	volatile unsigned long tail_pos;
- 	unsigned long buffer_size;
- 	struct task_struct * last_task;
--	int last_is_kernel;
-+	int last_cpu_mode;
- 	int tracing;
- 	struct op_sample * buffer;
- 	unsigned long sample_received;
-@@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
- void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
- 
- /* transient events for the CPU buffer -> event buffer */
--#define CPU_IS_KERNEL 1
--#define CPU_TRACE_BEGIN 2
-+#define CPU_MODE_USER           0
-+#define CPU_MODE_KERNEL         1
-+#define CPU_MODE_XEN            2
-+#define CPU_TRACE_BEGIN         3
-+#define CPU_DOMAIN_SWITCH       4
- 
- #endif /* OPROFILE_CPU_BUFFER_H */
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/event_buffer.h tmp-linux-2.6-xen.patch/drivers/oprofile/event_buffer.h
---- pristine-linux-2.6.18.2/drivers/oprofile/event_buffer.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/event_buffer.h	2007-10-14 01:51:15.000000000 +0200
-@@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
- #define CPU_SWITCH_CODE 		2
- #define COOKIE_SWITCH_CODE 		3
- #define KERNEL_ENTER_SWITCH_CODE	4
--#define KERNEL_EXIT_SWITCH_CODE		5
-+#define USER_ENTER_SWITCH_CODE		5
- #define MODULE_LOADED_CODE		6
- #define CTX_TGID_CODE			7
- #define TRACE_BEGIN_CODE		8
- #define TRACE_END_CODE			9
-+#define XEN_ENTER_SWITCH_CODE		10
-+#define DOMAIN_SWITCH_CODE		11
-  
- #define INVALID_COOKIE ~0UL
- #define NO_COOKIE 0UL
- 
-+/* Constant used to refer to coordinator domain (Xen) */
-+#define COORDINATOR_DOMAIN -1
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(chn)	((chn) != 0)
 +
- /* add data to the event buffer */
- void add_event_entry(unsigned long data);
-  
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/oprof.c tmp-linux-2.6-xen.patch/drivers/oprofile/oprof.c
---- pristine-linux-2.6.18.2/drivers/oprofile/oprof.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/oprof.c	2007-10-14 01:51:15.000000000 +0200
-@@ -5,6 +5,10 @@
-  * @remark Read the file COPYING
-  *
-  * @author John Levon <levon at movementarian.org>
-+ *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-  */
- 
- #include <linux/kernel.h>
-@@ -19,7 +23,7 @@
- #include "cpu_buffer.h"
- #include "buffer_sync.h"
- #include "oprofile_stats.h"
-- 
++/*
++ * Force a proper event-channel callback from Xen after clearing the
++ * callback mask. We do this in a very simple manner, by making a call
++ * down into Xen. The pending flag will be checked by Xen on return.
++ */
++void force_evtchn_callback(void)
++{
++	VOID(HYPERVISOR_xen_version(0, NULL));
++}
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(force_evtchn_callback);
 +
- struct oprofile_operations oprofile_ops;
- 
- unsigned long oprofile_started;
-@@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
-  */
- static int timer = 0;
- 
-+int oprofile_set_active(int active_domains[], unsigned int adomains)
++static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
++
++/* NB. Interrupts are disabled on entry. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 +{
-+	int err;
++	unsigned long       l1, l2;
++	unsigned long       masked_l1, masked_l2;
++	unsigned int        l1i, l2i, port, count;
++	int                 irq;
++	unsigned int        cpu = smp_processor_id();
++	shared_info_t      *s = HYPERVISOR_shared_info;
++	vcpu_info_t        *vcpu_info = &s->vcpu_info[cpu];
 +
-+	if (!oprofile_ops.set_active)
-+		return -EINVAL;
 +
-+	mutex_lock(&start_mutex);
-+	err = oprofile_ops.set_active(active_domains, adomains);
-+	mutex_unlock(&start_mutex);
-+	return err;
-+}
++	do {
++		/* Avoid a callback storm when we reenable delivery. */
++		vcpu_info->evtchn_upcall_pending = 0;
 +
-+int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
-+{
-+	int err;
++		/* Nested invocations bail immediately. */
++		if (unlikely(per_cpu(upcall_count, cpu)++))
++			return;
++
++#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
++		/* Clear master flag /before/ clearing selector flag. */
++		wmb();
++#endif
++		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++
++		l1i = per_cpu(last_processed_l1i, cpu);
++		l2i = per_cpu(last_processed_l2i, cpu);
++
++		while (l1 != 0) {
++
++			l1i = (l1i + 1) % BITS_PER_LONG;
++			masked_l1 = l1 & ((~0UL) << l1i);
++
++			if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
++				l1i = BITS_PER_LONG - 1;
++				l2i = BITS_PER_LONG - 1;
++				continue;
++			}
++			l1i = __ffs(masked_l1);
 +
-+	if (!oprofile_ops.set_passive)
-+		return -EINVAL;
++			do {
++				l2 = active_evtchns(cpu, s, l1i);
 +
-+	mutex_lock(&start_mutex);
-+	err = oprofile_ops.set_passive(passive_domains, pdomains);
-+	mutex_unlock(&start_mutex);
-+	return err;
-+}
++				l2i = (l2i + 1) % BITS_PER_LONG;
++				masked_l2 = l2 & ((~0UL) << l2i);
 +
- int oprofile_setup(void)
- {
- 	int err;
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/oprof.h tmp-linux-2.6-xen.patch/drivers/oprofile/oprof.h
---- pristine-linux-2.6.18.2/drivers/oprofile/oprof.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/oprof.h	2007-10-14 01:51:15.000000000 +0200
-@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
- void oprofile_timer_init(struct oprofile_operations * ops);
- 
- int oprofile_set_backtrace(unsigned long depth);
++				if (masked_l2 == 0) { /* if we masked out all events, move on */
++					l2i = BITS_PER_LONG - 1;
++					break;
++				}
 +
-+int oprofile_set_active(int active_domains[], unsigned int adomains);
-+int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
-  
- #endif /* OPROF_H */
-diff -Nurp pristine-linux-2.6.18.2/drivers/oprofile/oprofile_files.c tmp-linux-2.6-xen.patch/drivers/oprofile/oprofile_files.c
---- pristine-linux-2.6.18.2/drivers/oprofile/oprofile_files.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/oprofile/oprofile_files.c	2007-10-14 01:51:15.000000000 +0200
-@@ -5,15 +5,21 @@
-  * @remark Read the file COPYING
-  *
-  * @author John Levon <levon at movementarian.org>
-+ *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.	
-  */
- 
- #include <linux/fs.h>
- #include <linux/oprofile.h>
-+#include <asm/uaccess.h>
-+#include <linux/ctype.h>
- 
- #include "event_buffer.h"
- #include "oprofile_stats.h"
- #include "oprof.h"
-- 
++				l2i = __ffs(masked_l2);
 +
- unsigned long fs_buffer_size = 131072;
- unsigned long fs_cpu_buffer_size = 8192;
- unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
-@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file * 
- static struct file_operations dump_fops = {
- 	.write		= dump_write,
- };
-- 
++				/* process port */
++				port = (l1i * BITS_PER_LONG) + l2i;
++				if ((irq = evtchn_to_irq[port]) != -1)
++					do_IRQ(irq, regs);
++				else {
++					exit_idle();
++					evtchn_device_upcall(port);
++				}
 +
-+#define TMPBUFSIZE 512
++				/* if this is the final port processed, we'll pick up here+1 next time */
++				per_cpu(last_processed_l1i, cpu) = l1i;
++				per_cpu(last_processed_l2i, cpu) = l2i;
++
++			} while (l2i != BITS_PER_LONG - 1);
++
++			l2 = active_evtchns(cpu, s, l1i);
++			if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
++				l1 &= ~(1UL << l1i);
 +
-+static unsigned int adomains = 0;
-+static int active_domains[MAX_OPROF_DOMAINS + 1];
-+static DEFINE_MUTEX(adom_mutex);
++		}
 +
-+static ssize_t adomain_write(struct file * file, char const __user * buf, 
-+			     size_t count, loff_t * offset)
++		/* If there were nested callbacks then we have more to do. */
++		count = per_cpu(upcall_count, cpu);
++		per_cpu(upcall_count, cpu) = 0;
++	} while (unlikely(count != 1));
++}
++
++static int find_unbound_irq(void)
 +{
-+	char *tmpbuf;
-+	char *startp, *endp;
-+	int i;
-+	unsigned long val;
-+	ssize_t retval = count;
-+	
-+	if (*offset)
-+		return -EINVAL;	
-+	if (count > TMPBUFSIZE - 1)
-+		return -EINVAL;
++	static int warned;
++	int dynirq, irq;
 +
-+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+		return -ENOMEM;
++	for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
++		irq = dynirq_to_irq(dynirq);
++		if (irq_bindcount[irq] == 0)
++			return irq;
++	}
 +
-+	if (copy_from_user(tmpbuf, buf, count)) {
-+		kfree(tmpbuf);
-+		return -EFAULT;
++	if (!warned) {
++		warned = 1;
++		printk(KERN_WARNING "No available IRQ to bind to: "
++		       "increase NR_DYNIRQS.\n");
 +	}
-+	tmpbuf[count] = 0;
 +
-+	mutex_lock(&adom_mutex);
++	return -ENOSPC;
++}
 +
-+	startp = tmpbuf;
-+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
-+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
-+		val = simple_strtoul(startp, &endp, 0);
-+		if (endp == startp)
-+			break;
-+		while (ispunct(*endp) || isspace(*endp))
-+			endp++;
-+		active_domains[i] = val;
-+		if (active_domains[i] != val)
-+			/* Overflow, force error below */
-+			i = MAX_OPROF_DOMAINS + 1;
-+		startp = endp;
-+	}
-+	/* Force error on trailing junk */
-+	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++static int bind_caller_port_to_irq(unsigned int caller_port)
++{
++	int irq;
 +
-+	kfree(tmpbuf);
++	spin_lock(&irq_mapping_update_lock);
 +
-+	if (adomains > MAX_OPROF_DOMAINS
-+	    || oprofile_set_active(active_domains, adomains)) {
-+		adomains = 0;
-+		retval = -EINVAL;
++	if ((irq = evtchn_to_irq[caller_port]) == -1) {
++		if ((irq = find_unbound_irq()) < 0)
++			goto out;
++
++		evtchn_to_irq[caller_port] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
 +	}
 +
-+	mutex_unlock(&adom_mutex);
-+	return retval;
++	irq_bindcount[irq]++;
++
++ out:
++	spin_unlock(&irq_mapping_update_lock);
++	return irq;
 +}
 +
-+static ssize_t adomain_read(struct file * file, char __user * buf, 
-+			    size_t count, loff_t * offset)
++static int bind_local_port_to_irq(unsigned int local_port)
 +{
-+	char * tmpbuf;
-+	size_t len;
-+	int i;
-+	ssize_t retval;
++	int irq;
 +
-+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+		return -ENOMEM;
++	spin_lock(&irq_mapping_update_lock);
 +
-+	mutex_lock(&adom_mutex);
++	BUG_ON(evtchn_to_irq[local_port] != -1);
 +
-+	len = 0;
-+	for (i = 0; i < adomains; i++)
-+		len += snprintf(tmpbuf + len,
-+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
-+				"%u ", active_domains[i]);
-+	WARN_ON(len > TMPBUFSIZE);
-+	if (len != 0 && len <= TMPBUFSIZE)
-+		tmpbuf[len-1] = '\n';
++	if ((irq = find_unbound_irq()) < 0) {
++		struct evtchn_close close = { .port = local_port };
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++			BUG();
++		goto out;
++	}
 +
-+	mutex_unlock(&adom_mutex);
++	evtchn_to_irq[local_port] = irq;
++	irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
++	irq_bindcount[irq]++;
 +
-+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++ out:
++	spin_unlock(&irq_mapping_update_lock);
++	return irq;
++}
 +
-+	kfree(tmpbuf);
-+	return retval;
++static int bind_listening_port_to_irq(unsigned int remote_domain)
++{
++	struct evtchn_alloc_unbound alloc_unbound;
++	int err;
++
++	alloc_unbound.dom        = DOMID_SELF;
++	alloc_unbound.remote_dom = remote_domain;
++
++	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++					  &alloc_unbound);
++
++	return err ? : bind_local_port_to_irq(alloc_unbound.port);
 +}
 +
++static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
++					  unsigned int remote_port)
++{
++	struct evtchn_bind_interdomain bind_interdomain;
++	int err;
 +
-+static struct file_operations active_domain_ops = {
-+	.read		= adomain_read,
-+	.write		= adomain_write,
-+};
++	bind_interdomain.remote_dom  = remote_domain;
++	bind_interdomain.remote_port = remote_port;
 +
-+static unsigned int pdomains = 0;
-+static int passive_domains[MAX_OPROF_DOMAINS];
-+static DEFINE_MUTEX(pdom_mutex);
++	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++					  &bind_interdomain);
 +
-+static ssize_t pdomain_write(struct file * file, char const __user * buf, 
-+			     size_t count, loff_t * offset)
++	return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
++}
++
++static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 +{
-+	char *tmpbuf;
-+	char *startp, *endp;
-+	int i;
-+	unsigned long val;
-+	ssize_t retval = count;
-+	
-+	if (*offset)
-+		return -EINVAL;	
-+	if (count > TMPBUFSIZE - 1)
-+		return -EINVAL;
++	struct evtchn_bind_virq bind_virq;
++	int evtchn, irq;
 +
-+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+		return -ENOMEM;
++	spin_lock(&irq_mapping_update_lock);
 +
-+	if (copy_from_user(tmpbuf, buf, count)) {
-+		kfree(tmpbuf);
-+		return -EFAULT;
-+	}
-+	tmpbuf[count] = 0;
++	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
++		if ((irq = find_unbound_irq()) < 0)
++			goto out;
 +
-+	mutex_lock(&pdom_mutex);
++		bind_virq.virq = virq;
++		bind_virq.vcpu = cpu;
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++						&bind_virq) != 0)
++			BUG();
++		evtchn = bind_virq.port;
 +
-+	startp = tmpbuf;
-+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
-+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
-+		val = simple_strtoul(startp, &endp, 0);
-+		if (endp == startp)
-+			break;
-+		while (ispunct(*endp) || isspace(*endp))
-+			endp++;
-+		passive_domains[i] = val;
-+		if (passive_domains[i] != val)
-+			/* Overflow, force error below */
-+			i = MAX_OPROF_DOMAINS + 1;
-+		startp = endp;
-+	}
-+	/* Force error on trailing junk */
-+	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
 +
-+	kfree(tmpbuf);
++		per_cpu(virq_to_irq, cpu)[virq] = irq;
 +
-+	if (pdomains > MAX_OPROF_DOMAINS
-+	    || oprofile_set_passive(passive_domains, pdomains)) {
-+		pdomains = 0;
-+		retval = -EINVAL;
++		bind_evtchn_to_cpu(evtchn, cpu);
 +	}
 +
-+	mutex_unlock(&pdom_mutex);
-+	return retval;
++	irq_bindcount[irq]++;
++
++ out:
++	spin_unlock(&irq_mapping_update_lock);
++	return irq;
 +}
 +
-+static ssize_t pdomain_read(struct file * file, char __user * buf, 
-+			    size_t count, loff_t * offset)
++static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 +{
-+	char * tmpbuf;
-+	size_t len;
-+	int i;
-+	ssize_t retval;
++	struct evtchn_bind_ipi bind_ipi;
++	int evtchn, irq;
 +
-+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+		return -ENOMEM;
++	spin_lock(&irq_mapping_update_lock);
 +
-+	mutex_lock(&pdom_mutex);
++	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
++		if ((irq = find_unbound_irq()) < 0)
++			goto out;
 +
-+	len = 0;
-+	for (i = 0; i < pdomains; i++)
-+		len += snprintf(tmpbuf + len,
-+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
-+				"%u ", passive_domains[i]);
-+	WARN_ON(len > TMPBUFSIZE);
-+	if (len != 0 && len <= TMPBUFSIZE)
-+		tmpbuf[len-1] = '\n';
++		bind_ipi.vcpu = cpu;
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++						&bind_ipi) != 0)
++			BUG();
++		evtchn = bind_ipi.port;
 +
-+	mutex_unlock(&pdom_mutex);
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
 +
-+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
 +
-+	kfree(tmpbuf);
-+	return retval;
++		bind_evtchn_to_cpu(evtchn, cpu);
++	}
++
++	irq_bindcount[irq]++;
++
++ out:
++	spin_unlock(&irq_mapping_update_lock);
++	return irq;
 +}
 +
-+static struct file_operations passive_domain_ops = {
-+	.read		= pdomain_read,
-+	.write		= pdomain_write,
-+};
++static void unbind_from_irq(unsigned int irq)
++{
++	struct evtchn_close close;
++	unsigned int cpu;
++	int evtchn = evtchn_from_irq(irq);
 +
- void oprofile_create_files(struct super_block * sb, struct dentry * root)
- {
- 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
- 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
-+	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
-+	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
- 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
- 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
- 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
-diff -Nurp pristine-linux-2.6.18.2/drivers/pci/Kconfig tmp-linux-2.6-xen.patch/drivers/pci/Kconfig
---- pristine-linux-2.6.18.2/drivers/pci/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/pci/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -5,6 +5,7 @@ config PCI_MSI
- 	bool "Message Signaled Interrupts (MSI and MSI-X)"
- 	depends on PCI
- 	depends on (X86_LOCAL_APIC && X86_IO_APIC) || IA64
-+	depends on !XEN
- 	help
- 	   This allows device drivers to enable MSI (Message Signaled
- 	   Interrupts).  Message Signaled Interrupts enable a device to
-diff -Nurp pristine-linux-2.6.18.2/drivers/serial/Kconfig tmp-linux-2.6-xen.patch/drivers/serial/Kconfig
---- pristine-linux-2.6.18.2/drivers/serial/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/serial/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -11,6 +11,7 @@ menu "Serial drivers"
- config SERIAL_8250
- 	tristate "8250/16550 and compatible serial support"
- 	depends on (BROKEN || !SPARC)
-+	depends on !XEN_DISABLE_SERIAL
- 	select SERIAL_CORE
- 	---help---
- 	  This selects whether you want to include the driver for the standard
-diff -Nurp pristine-linux-2.6.18.2/drivers/video/console/Kconfig tmp-linux-2.6-xen.patch/drivers/video/console/Kconfig
---- pristine-linux-2.6.18.2/drivers/video/console/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/video/console/Kconfig	2007-07-30 16:35:11.000000000 +0200
-@@ -53,6 +53,7 @@ config VGACON_SOFT_SCROLLBACK_SIZE
- config VIDEO_SELECT
- 	bool "Video mode selection support"
- 	depends on  X86 && VGA_CONSOLE
-+	depends on !XEN
- 	---help---
- 	  This enables support for text mode selection on kernel startup. If
- 	  you want to take advantage of some high-resolution text mode your
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/balloon/balloon.c tmp-linux-2.6-xen.patch/drivers/xen/balloon/balloon.c
---- pristine-linux-2.6.18.2/drivers/xen/balloon/balloon.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/balloon/balloon.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,663 @@
-+/******************************************************************************
-+ * balloon.c
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	spin_lock(&irq_mapping_update_lock);
 +
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mutex.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+#include <asm/maddr.h>
-+#include <asm/page.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <linux/highmem.h>
-+#include <linux/list.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
++	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
++		close.port = evtchn;
++		if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
++		    HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++			BUG();
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++		switch (type_from_irq(irq)) {
++		case IRQT_VIRQ:
++			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
++				[index_from_irq(irq)] = -1;
++			break;
++		case IRQT_IPI:
++			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
++				[index_from_irq(irq)] = -1;
++			break;
++		default:
++			break;
++		}
 +
-+#ifdef CONFIG_PROC_FS
-+static struct proc_dir_entry *balloon_pde;
-+#endif
++		/* Closed ports are implicitly re-bound to VCPU0. */
++		bind_evtchn_to_cpu(evtchn, 0);
 +
-+static DEFINE_MUTEX(balloon_mutex);
++		evtchn_to_irq[evtchn] = -1;
++		irq_info[irq] = IRQ_UNBOUND;
 +
-+/*
-+ * Protects atomic reservation decrease/increase against concurrent increases.
-+ * Also protects non-atomic updates of current_pages and driver_pages, and
-+ * balloon lists.
-+ */
-+DEFINE_SPINLOCK(balloon_lock);
++		/* Zap stats across IRQ changes of use. */
++		for_each_possible_cpu(cpu)
++			kstat_cpu(cpu).irqs[irq] = 0;
++	}
 +
-+struct balloon_stats balloon_stats;
++	spin_unlock(&irq_mapping_update_lock);
++}
 +
-+/* We increase/decrease in batches which fit in a page */
-+static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
++int bind_caller_port_to_irqhandler(
++	unsigned int caller_port,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	int irq, retval;
 +
-+/* VM /proc information for memory */
-+extern unsigned long totalram_pages;
++	irq = bind_caller_port_to_irq(caller_port);
++	if (irq < 0)
++		return irq;
 +
-+/* List of ballooned pages, threaded through the mem_map array. */
-+static LIST_HEAD(ballooned_pages);
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
 +
-+/* Main work function, always executed in process context. */
-+static void balloon_process(void *unused);
-+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
-+static struct timer_list balloon_timer;
++	return irq;
++}
++EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
 +
-+/* When ballooning out (allocating memory to return to Xen) we don't really 
-+   want the kernel to try too hard since that can trigger the oom killer. */
-+#define GFP_BALLOON \
-+	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
++int bind_listening_port_to_irqhandler(
++	unsigned int remote_domain,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	int irq, retval;
++
++	irq = bind_listening_port_to_irq(remote_domain);
++	if (irq < 0)
++		return irq;
++
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
++
++	return irq;
++}
++EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
++
++int bind_interdomain_evtchn_to_irqhandler(
++	unsigned int remote_domain,
++	unsigned int remote_port,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	int irq, retval;
 +
-+#define PAGE_TO_LIST(p) (&(p)->lru)
-+#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
-+#define UNLIST_PAGE(p)				\
-+	do {					\
-+		list_del(PAGE_TO_LIST(p));	\
-+		PAGE_TO_LIST(p)->next = NULL;	\
-+		PAGE_TO_LIST(p)->prev = NULL;	\
-+	} while(0)
++	irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
++	if (irq < 0)
++		return irq;
 +
-+#define IPRINTK(fmt, args...) \
-+	printk(KERN_INFO "xen_mem: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+	printk(KERN_WARNING "xen_mem: " fmt, ##args)
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
 +
-+/* balloon_append: add the given page to the balloon. */
-+static void balloon_append(struct page *page)
++	return irq;
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++
++int bind_virq_to_irqhandler(
++	unsigned int virq,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
 +{
-+	/* Lowmem is re-populated first, so highmem pages go at list tail. */
-+	if (PageHighMem(page)) {
-+		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-+		bs.balloon_high++;
-+	} else {
-+		list_add(PAGE_TO_LIST(page), &ballooned_pages);
-+		bs.balloon_low++;
++	int irq, retval;
++
++	irq = bind_virq_to_irq(virq, cpu);
++	if (irq < 0)
++		return irq;
++
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
 +	}
++
++	return irq;
 +}
++EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
 +
-+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-+static struct page *balloon_retrieve(void)
++int bind_ipi_to_irqhandler(
++	unsigned int ipi,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
 +{
-+	struct page *page;
-+
-+	if (list_empty(&ballooned_pages))
-+		return NULL;
++	int irq, retval;
 +
-+	page = LIST_TO_PAGE(ballooned_pages.next);
-+	UNLIST_PAGE(page);
++	irq = bind_ipi_to_irq(ipi, cpu);
++	if (irq < 0)
++		return irq;
 +
-+	if (PageHighMem(page))
-+		bs.balloon_high--;
-+	else
-+		bs.balloon_low--;
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
 +
-+	return page;
++	return irq;
 +}
++EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
 +
-+static struct page *balloon_first_page(void)
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 +{
-+	if (list_empty(&ballooned_pages))
-+		return NULL;
-+	return LIST_TO_PAGE(ballooned_pages.next);
++	free_irq(irq, dev_id);
++	unbind_from_irq(irq);
 +}
++EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 +
-+static struct page *balloon_next_page(struct page *page)
++#ifdef CONFIG_SMP
++void rebind_evtchn_to_cpu(int port, unsigned int cpu)
 +{
-+	struct list_head *next = PAGE_TO_LIST(page)->next;
-+	if (next == &ballooned_pages)
-+		return NULL;
-+	return LIST_TO_PAGE(next);
++	struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
++	int masked;
++
++	masked = test_and_set_evtchn_mask(port);
++	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
++		bind_evtchn_to_cpu(port, cpu);
++	if (!masked)
++		unmask_evtchn(port);
 +}
 +
-+static void balloon_alarm(unsigned long unused)
++static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
 +{
-+	schedule_work(&balloon_worker);
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		rebind_evtchn_to_cpu(evtchn, tcpu);
 +}
 +
-+static unsigned long current_target(void)
++static void set_affinity_irq(unsigned int irq, cpumask_t dest)
 +{
-+	unsigned long target = min(bs.target_pages, bs.hard_limit);
-+	if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
-+		target = bs.current_pages + bs.balloon_low + bs.balloon_high;
-+	return target;
++	unsigned tcpu = first_cpu(dest);
++	rebind_irq_to_cpu(irq, tcpu);
 +}
++#endif
 +
-+static int increase_reservation(unsigned long nr_pages)
++int resend_irq_on_evtchn(unsigned int irq)
 +{
-+	unsigned long  pfn, i, flags;
-+	struct page   *page;
-+	long           rc;
-+	struct xen_memory_reservation reservation = {
-+		.address_bits = 0,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	if (nr_pages > ARRAY_SIZE(frame_list))
-+		nr_pages = ARRAY_SIZE(frame_list);
-+
-+	balloon_lock(flags);
-+
-+	page = balloon_first_page();
-+	for (i = 0; i < nr_pages; i++) {
-+		BUG_ON(page == NULL);
-+		frame_list[i] = page_to_pfn(page);;
-+		page = balloon_next_page(page);
-+	}
-+
-+	set_xen_guest_handle(reservation.extent_start, frame_list);
-+	reservation.nr_extents   = nr_pages;
-+	rc = HYPERVISOR_memory_op(
-+		XENMEM_populate_physmap, &reservation);
-+	if (rc < nr_pages) {
-+		if (rc > 0) {
-+			int ret;
-+
-+			/* We hit the Xen hard limit: reprobe. */
-+			reservation.nr_extents = rc;
-+			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					&reservation);
-+			BUG_ON(ret != rc);
-+		}
-+		if (rc >= 0)
-+			bs.hard_limit = (bs.current_pages + rc -
-+					 bs.driver_pages);
-+		goto out;
-+	}
-+
-+	for (i = 0; i < nr_pages; i++) {
-+		page = balloon_retrieve();
-+		BUG_ON(page == NULL);
-+
-+		pfn = page_to_pfn(page);
-+		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
-+		       phys_to_machine_mapping_valid(pfn));
++	int masked, evtchn = evtchn_from_irq(irq);
++	shared_info_t *s = HYPERVISOR_shared_info;
 +
-+		set_phys_to_machine(pfn, frame_list[i]);
++	if (!VALID_EVTCHN(evtchn))
++		return 1;
 +
-+#ifdef CONFIG_XEN
-+		/* Link back into the page tables if not highmem. */
-+		if (pfn < max_low_pfn) {
-+			int ret;
-+			ret = HYPERVISOR_update_va_mapping(
-+				(unsigned long)__va(pfn << PAGE_SHIFT),
-+				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
-+				0);
-+			BUG_ON(ret);
-+		}
-+#endif
++	masked = test_and_set_evtchn_mask(evtchn);
++	synch_set_bit(evtchn, s->evtchn_pending);
++	if (!masked)
++		unmask_evtchn(evtchn);
 +
-+		/* Relinquish the page back to the allocator. */
-+		ClearPageReserved(page);
-+		init_page_count(page);
-+		__free_page(page);
-+	}
++	return 1;
++}
 +
-+	bs.current_pages += nr_pages;
-+	totalram_pages = bs.current_pages;
++/*
++ * Interface to generic handling in irq.c
++ */
 +
-+ out:
-+	balloon_unlock(flags);
++static unsigned int startup_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
 +
++	if (VALID_EVTCHN(evtchn))
++		unmask_evtchn(evtchn);
 +	return 0;
 +}
 +
-+static int decrease_reservation(unsigned long nr_pages)
++static void shutdown_dynirq(unsigned int irq)
 +{
-+	unsigned long  pfn, i, flags;
-+	struct page   *page;
-+	void          *v;
-+	int            need_sleep = 0;
-+	int ret;
-+	struct xen_memory_reservation reservation = {
-+		.address_bits = 0,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	if (nr_pages > ARRAY_SIZE(frame_list))
-+		nr_pages = ARRAY_SIZE(frame_list);
-+
-+	for (i = 0; i < nr_pages; i++) {
-+		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
-+			nr_pages = i;
-+			need_sleep = 1;
-+			break;
-+		}
-+
-+		pfn = page_to_pfn(page);
-+		frame_list[i] = pfn_to_mfn(pfn);
++	int evtchn = evtchn_from_irq(irq);
 +
-+		if (!PageHighMem(page)) {
-+			v = phys_to_virt(pfn << PAGE_SHIFT);
-+			scrub_pages(v, 1);
-+#ifdef CONFIG_XEN
-+			ret = HYPERVISOR_update_va_mapping(
-+				(unsigned long)v, __pte_ma(0), 0);
-+			BUG_ON(ret);
-+#endif
-+		}
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+		else {
-+			v = kmap(page);
-+			scrub_pages(v, 1);
-+			kunmap(page);
-+		}
-+#endif
-+	}
++	if (VALID_EVTCHN(evtchn))
++		mask_evtchn(evtchn);
++}
 +
-+#ifdef CONFIG_XEN
-+	/* Ensure that ballooned highmem pages don't have kmaps. */
-+	kmap_flush_unused();
-+	flush_tlb_all();
-+#endif
++static void enable_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
 +
-+	balloon_lock(flags);
++	if (VALID_EVTCHN(evtchn))
++		unmask_evtchn(evtchn);
++}
 +
-+	/* No more mappings: invalidate P2M and add to balloon. */
-+	for (i = 0; i < nr_pages; i++) {
-+		pfn = mfn_to_pfn(frame_list[i]);
-+		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+		balloon_append(pfn_to_page(pfn));
-+	}
++static void disable_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
 +
-+	set_xen_guest_handle(reservation.extent_start, frame_list);
-+	reservation.nr_extents   = nr_pages;
-+	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+	BUG_ON(ret != nr_pages);
++	if (VALID_EVTCHN(evtchn))
++		mask_evtchn(evtchn);
++}
 +
-+	bs.current_pages -= nr_pages;
-+	totalram_pages = bs.current_pages;
++static void ack_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
 +
-+	balloon_unlock(flags);
++	move_native_irq(irq);
 +
-+	return need_sleep;
++	if (VALID_EVTCHN(evtchn)) {
++		mask_evtchn(evtchn);
++		clear_evtchn(evtchn);
++	}
 +}
 +
-+/*
-+ * We avoid multiple worker processes conflicting via the balloon mutex.
-+ * We may of course race updates of the target counts (which are protected
-+ * by the balloon lock), or with changes to the Xen hard limit, but we will
-+ * recover from these in time.
-+ */
-+static void balloon_process(void *unused)
++static void end_dynirq(unsigned int irq)
 +{
-+	int need_sleep = 0;
-+	long credit;
-+
-+	mutex_lock(&balloon_mutex);
++	int evtchn = evtchn_from_irq(irq);
 +
-+	do {
-+		credit = current_target() - bs.current_pages;
-+		if (credit > 0)
-+			need_sleep = (increase_reservation(credit) != 0);
-+		if (credit < 0)
-+			need_sleep = (decrease_reservation(-credit) != 0);
++	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
++		unmask_evtchn(evtchn);
++}
 +
-+#ifndef CONFIG_PREEMPT
-+		if (need_resched())
-+			schedule();
++static struct hw_interrupt_type dynirq_type = {
++	.typename = "Dynamic-irq",
++	.startup  = startup_dynirq,
++	.shutdown = shutdown_dynirq,
++	.enable   = enable_dynirq,
++	.disable  = disable_dynirq,
++	.ack      = ack_dynirq,
++	.end      = end_dynirq,
++#ifdef CONFIG_SMP
++	.set_affinity = set_affinity_irq,
 +#endif
-+	} while ((credit != 0) && !need_sleep);
-+
-+	/* Schedule more work if there is some still to be done. */
-+	if (current_target() != bs.current_pages)
-+		mod_timer(&balloon_timer, jiffies + HZ);
-+
-+	mutex_unlock(&balloon_mutex);
-+}
++	.retrigger = resend_irq_on_evtchn,
++};
 +
-+/* Resets the Xen limit, sets new target, and kicks off processing. */
-+void balloon_set_new_target(unsigned long target)
++void evtchn_register_pirq(int irq)
 +{
-+	/* No need for lock. Not read-modify-write updates. */
-+	bs.hard_limit   = ~0UL;
-+	bs.target_pages = target;
-+	schedule_work(&balloon_worker);
++	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
 +}
 +
-+static struct xenbus_watch target_watch =
-+{
-+	.node = "memory/target"
-+};
++#ifndef CONFIG_X86_IO_APIC
++#undef IO_APIC_IRQ
++#define IO_APIC_IRQ(irq) ((irq) >= pirq_to_irq(16))
++#endif
 +
-+/* React to a change in the target key */
-+static void watch_target(struct xenbus_watch *watch,
-+			 const char **vec, unsigned int len)
++int evtchn_map_pirq(int irq, int xen_pirq)
 +{
-+	unsigned long long new_target;
-+	int err;
++	if (irq < 0) {
++		static DEFINE_SPINLOCK(irq_alloc_lock);
 +
-+	err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
-+	if (err != 1) {
-+		/* This is ok (for domain0 at least) - so just return */
-+		return;
++		irq = pirq_to_irq(NR_PIRQS - 1);
++		spin_lock(&irq_alloc_lock);
++		do {
++			if (!IO_APIC_IRQ(irq))
++				continue;
++			if (!index_from_irq(irq)) {
++				BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
++				irq_info[irq] = mk_irq_info(IRQT_PIRQ,
++							    xen_pirq, 0);
++				break;
++			}
++		} while (--irq);
++		spin_unlock(&irq_alloc_lock);
++		if (irq < pirq_to_irq(16))
++			return -ENOSPC;
++	} else if (!xen_pirq) {
++		if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
++			return -EINVAL;
++		irq_info[irq] = IRQ_UNBOUND;
++		return 0;
++	} else if (type_from_irq(irq) != IRQT_PIRQ
++		   || index_from_irq(irq) != xen_pirq) {
++		printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
++				"cannot map to PIRQ#%u\n",
++		       irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
++		return -EINVAL;
 +	}
-+
-+	/* The given memory/target value is in KiB, so it needs converting to
-+	 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
-+	 */
-+	balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
++	return index_from_irq(irq) ? irq : -EINVAL;
 +}
 +
-+static int balloon_init_watcher(struct notifier_block *notifier,
-+				unsigned long event,
-+				void *data)
++int evtchn_get_xen_pirq(int irq)
 +{
-+	int err;
++	if (!IO_APIC_IRQ(irq))
++		return irq;
++	if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
++		return 0;
++	return index_from_irq(irq);
++}
 +
-+	err = register_xenbus_watch(&target_watch);
-+	if (err)
-+		printk(KERN_ERR "Failed to set balloon watcher\n");
++static inline void pirq_unmask_notify(int pirq)
++{
++	struct physdev_eoi eoi = { .irq = pirq };
++	if (unlikely(test_bit(pirq, pirq_needs_eoi)))
++		VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++}
 +
-+	return NOTIFY_DONE;
++static inline void pirq_query_unmask(int pirq)
++{
++	struct physdev_irq_status_query irq_status;
++	irq_status.irq = pirq;
++	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++		irq_status.flags = 0;
++	clear_bit(pirq, pirq_needs_eoi);
++	if (irq_status.flags & XENIRQSTAT_needs_eoi)
++		set_bit(pirq, pirq_needs_eoi);
 +}
 +
-+#ifdef CONFIG_PROC_FS
-+static int balloon_write(struct file *file, const char __user *buffer,
-+			 unsigned long count, void *data)
++/*
++ * On startup, if there is no action associated with the IRQ then we are
++ * probing. In this case we should not share with others as it will confuse us.
++ */
++#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++
++static unsigned int startup_pirq(unsigned int irq)
 +{
-+	char memstring[64], *endchar;
-+	unsigned long long target_bytes;
++	struct evtchn_bind_pirq bind_pirq;
++	int evtchn = evtchn_from_irq(irq);
 +
-+	if (!capable(CAP_SYS_ADMIN))
-+		return -EPERM;
++	if (VALID_EVTCHN(evtchn))
++		goto out;
 +
-+	if (count <= 1)
-+		return -EBADMSG; /* runt */
-+	if (count > sizeof(memstring))
-+		return -EFBIG;   /* too long */
++	bind_pirq.pirq = evtchn_get_xen_pirq(irq);
++	/* NB. We are happy to share unless we are probing. */
++	bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
++	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
++		if (!probing_irq(irq))
++			printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
++			       irq);
++		return 0;
++	}
++	evtchn = bind_pirq.port;
 +
-+	if (copy_from_user(memstring, buffer, count))
-+		return -EFAULT;
-+	memstring[sizeof(memstring)-1] = '\0';
++	pirq_query_unmask(irq_to_pirq(irq));
 +
-+	target_bytes = memparse(memstring, &endchar);
-+	balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++	evtchn_to_irq[evtchn] = irq;
++	bind_evtchn_to_cpu(evtchn, 0);
++	irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
 +
-+	return count;
++ out:
++	unmask_evtchn(evtchn);
++	pirq_unmask_notify(irq_to_pirq(irq));
++
++	return 0;
 +}
 +
-+static int balloon_read(char *page, char **start, off_t off,
-+			int count, int *eof, void *data)
++static void shutdown_pirq(unsigned int irq)
 +{
-+	int len;
++	struct evtchn_close close;
++	int evtchn = evtchn_from_irq(irq);
 +
-+	len = sprintf(
-+		page,
-+		"Current allocation: %8lu kB\n"
-+		"Requested target:   %8lu kB\n"
-+		"Low-mem balloon:    %8lu kB\n"
-+		"High-mem balloon:   %8lu kB\n"
-+		"Driver pages:       %8lu kB\n"
-+		"Xen hard limit:     ",
-+		PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), 
-+		PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
-+		PAGES2KB(bs.driver_pages));
++	if (!VALID_EVTCHN(evtchn))
++		return;
 +
-+	if (bs.hard_limit != ~0UL)
-+		len += sprintf(page + len, "%8lu kB\n",
-+			       PAGES2KB(bs.hard_limit));
-+	else
-+		len += sprintf(page + len, "     ??? kB\n");
++	mask_evtchn(evtchn);
 +
-+	*eof = 1;
-+	return len;
++	close.port = evtchn;
++	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++		BUG();
++
++	bind_evtchn_to_cpu(evtchn, 0);
++	evtchn_to_irq[evtchn] = -1;
++	irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
 +}
-+#endif
 +
-+static struct notifier_block xenstore_notifier;
++static void enable_pirq(unsigned int irq)
++{
++	startup_pirq(irq);
++}
 +
-+static int __init balloon_init(void)
++static void disable_pirq(unsigned int irq)
 +{
-+#if defined(CONFIG_X86) && defined(CONFIG_XEN) 
-+	unsigned long pfn;
-+	struct page *page;
-+#endif
++}
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++static void ack_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
 +
-+	IPRINTK("Initialising balloon driver.\n");
++	move_native_irq(irq);
 +
-+#ifdef CONFIG_XEN
-+	bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
-+	totalram_pages   = bs.current_pages;
-+#else 
-+	bs.current_pages = totalram_pages; 
-+#endif
-+	bs.target_pages  = bs.current_pages;
-+	bs.balloon_low   = 0;
-+	bs.balloon_high  = 0;
-+	bs.driver_pages  = 0UL;
-+	bs.hard_limit    = ~0UL;
++	if (VALID_EVTCHN(evtchn)) {
++		mask_evtchn(evtchn);
++		clear_evtchn(evtchn);
++	}
++}
 +
-+	init_timer(&balloon_timer);
-+	balloon_timer.data = 0;
-+	balloon_timer.function = balloon_alarm;
-+    
-+#ifdef CONFIG_PROC_FS
-+	if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
-+		WPRINTK("Unable to create /proc/xen/balloon.\n");
-+		return -1;
++static void end_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
++	    (IRQ_DISABLED|IRQ_PENDING)) {
++		shutdown_pirq(irq);
++	} else if (VALID_EVTCHN(evtchn)) {
++		unmask_evtchn(evtchn);
++		pirq_unmask_notify(irq_to_pirq(irq));
 +	}
++}
 +
-+	balloon_pde->read_proc  = balloon_read;
-+	balloon_pde->write_proc = balloon_write;
++static struct hw_interrupt_type pirq_type = {
++	.typename = "Phys-irq",
++	.startup  = startup_pirq,
++	.shutdown = shutdown_pirq,
++	.enable   = enable_pirq,
++	.disable  = disable_pirq,
++	.ack      = ack_pirq,
++	.end      = end_pirq,
++#ifdef CONFIG_SMP
++	.set_affinity = set_affinity_irq,
 +#endif
-+	balloon_sysfs_init();
++	.retrigger = resend_irq_on_evtchn,
++};
 +
-+#if defined(CONFIG_X86) && defined(CONFIG_XEN) 
-+	/* Initialise the balloon with excess memory space. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+		page = pfn_to_page(pfn);
-+		if (!PageReserved(page))
-+			balloon_append(page);
-+	}
-+#endif
++int irq_ignore_unhandled(unsigned int irq)
++{
++	struct physdev_irq_status_query irq_status = { .irq = irq };
 +
-+	target_watch.callback = watch_target;
-+	xenstore_notifier.notifier_call = balloon_init_watcher;
++	if (!is_running_on_xen())
++		return 0;
 +
-+	register_xenstore_notifier(&xenstore_notifier);
-+    
-+	return 0;
++	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++		return 0;
++	return !!(irq_status.flags & XENIRQSTAT_shared);
 +}
 +
-+subsys_initcall(balloon_init);
-+
-+static void balloon_exit(void) 
++void notify_remote_via_irq(int irq)
 +{
-+    /* XXX - release balloon here */
-+    return; 
-+}
++	int evtchn = evtchn_from_irq(irq);
 +
-+module_exit(balloon_exit); 
++	if (VALID_EVTCHN(evtchn))
++		notify_remote_via_evtchn(evtchn);
++}
++EXPORT_SYMBOL_GPL(notify_remote_via_irq);
 +
-+void balloon_update_driver_allowance(long delta)
++int irq_to_evtchn_port(int irq)
 +{
-+	unsigned long flags;
-+
-+	balloon_lock(flags);
-+	bs.driver_pages += delta;
-+	balloon_unlock(flags);
++	return evtchn_from_irq(irq);
 +}
++EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
 +
-+#ifdef CONFIG_XEN
-+static int dealloc_pte_fn(
-+	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++void mask_evtchn(int port)
 +{
-+	unsigned long mfn = pte_mfn(*pte);
-+	int ret;
-+	struct xen_memory_reservation reservation = {
-+		.nr_extents   = 1,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+	set_xen_guest_handle(reservation.extent_start, &mfn);
-+	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
-+	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+	BUG_ON(ret != 1);
-+	return 0;
++	shared_info_t *s = HYPERVISOR_shared_info;
++	synch_set_bit(port, s->evtchn_mask);
 +}
-+#endif
++EXPORT_SYMBOL_GPL(mask_evtchn);
 +
-+struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++void unmask_evtchn(int port)
 +{
-+	unsigned long vaddr, flags;
-+	struct page *page, **pagevec;
-+	int i, ret;
++	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned int cpu = smp_processor_id();
++	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
 +
-+	pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
-+	if (pagevec == NULL)
-+		return NULL;
++	BUG_ON(!irqs_disabled());
 +
-+	for (i = 0; i < nr_pages; i++) {
-+		page = pagevec[i] = alloc_page(GFP_KERNEL);
-+		if (page == NULL)
-+			goto err;
++	/* Slow path (hypercall) if this is a non-local port. */
++	if (unlikely(cpu != cpu_from_evtchn(port))) {
++		struct evtchn_unmask unmask = { .port = port };
++		VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
++		return;
++	}
 +
-+		vaddr = (unsigned long)page_address(page);
++	synch_clear_bit(port, s->evtchn_mask);
 +
-+		scrub_pages(vaddr, 1);
++	/* Did we miss an interrupt 'edge'? Re-fire if so. */
++	if (synch_test_bit(port, s->evtchn_pending) &&
++	    !synch_test_and_set_bit(port / BITS_PER_LONG,
++				    &vcpu_info->evtchn_pending_sel))
++		vcpu_info->evtchn_upcall_pending = 1;
++}
++EXPORT_SYMBOL_GPL(unmask_evtchn);
 +
-+		balloon_lock(flags);
++void disable_all_local_evtchn(void)
++{
++	unsigned i, cpu = smp_processor_id();
++	shared_info_t *s = HYPERVISOR_shared_info;
 +
-+		if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+			unsigned long gmfn = page_to_pfn(page);
-+			struct xen_memory_reservation reservation = {
-+				.nr_extents   = 1,
-+				.extent_order = 0,
-+				.domid        = DOMID_SELF
-+			};
-+			set_xen_guest_handle(reservation.extent_start, &gmfn);
-+			ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+						   &reservation);
-+			if (ret == 1)
-+				ret = 0; /* success */
-+		} else {
-+#ifdef CONFIG_XEN
-+			ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
-+						  dealloc_pte_fn, NULL);
-+#else
-+			/* Cannot handle non-auto translate mode. */
-+			ret = 1;
-+#endif
-+		}
++	for (i = 0; i < NR_EVENT_CHANNELS; ++i)
++		if (cpu_from_evtchn(i) == cpu)
++			synch_set_bit(i, &s->evtchn_mask[0]);
++}
 +
-+		if (ret != 0) {
-+			balloon_unlock(flags);
-+			__free_page(page);
-+			goto err;
-+		}
++static void restore_cpu_virqs(unsigned int cpu)
++{
++	struct evtchn_bind_virq bind_virq;
++	int virq, irq, evtchn;
 +
-+		totalram_pages = --bs.current_pages;
++	for (virq = 0; virq < NR_VIRQS; virq++) {
++		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
++			continue;
 +
-+		balloon_unlock(flags);
-+	}
++		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
 +
-+ out:
-+	schedule_work(&balloon_worker);
-+#ifdef CONFIG_XEN
-+	flush_tlb_all();
-+#endif
-+	return pagevec;
++		/* Get a new binding from Xen. */
++		bind_virq.virq = virq;
++		bind_virq.vcpu = cpu;
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++						&bind_virq) != 0)
++			BUG();
++		evtchn = bind_virq.port;
 +
-+ err:
-+	balloon_lock(flags);
-+	while (--i >= 0)
-+		balloon_append(pagevec[i]);
-+	balloon_unlock(flags);
-+	kfree(pagevec);
-+	pagevec = NULL;
-+	goto out;
++		/* Record the new mapping. */
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++		bind_evtchn_to_cpu(evtchn, cpu);
++
++		/* Ready for use. */
++		unmask_evtchn(evtchn);
++	}
 +}
 +
-+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++static void restore_cpu_ipis(unsigned int cpu)
 +{
-+	unsigned long flags;
-+	int i;
++	struct evtchn_bind_ipi bind_ipi;
++	int ipi, irq, evtchn;
 +
-+	if (pagevec == NULL)
-+		return;
++	for (ipi = 0; ipi < NR_IPIS; ipi++) {
++		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
++			continue;
 +
-+	balloon_lock(flags);
-+	for (i = 0; i < nr_pages; i++) {
-+		BUG_ON(page_count(pagevec[i]) != 1);
-+		balloon_append(pagevec[i]);
-+	}
-+	balloon_unlock(flags);
++		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
 +
-+	kfree(pagevec);
++		/* Get a new binding from Xen. */
++		bind_ipi.vcpu = cpu;
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++						&bind_ipi) != 0)
++			BUG();
++		evtchn = bind_ipi.port;
 +
-+	schedule_work(&balloon_worker);
++		/* Record the new mapping. */
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++		bind_evtchn_to_cpu(evtchn, cpu);
++
++		/* Ready for use. */
++		unmask_evtchn(evtchn);
++
++	}
 +}
 +
-+void balloon_release_driver_page(struct page *page)
++void irq_resume(void)
 +{
-+	unsigned long flags;
++	unsigned int cpu, pirq, irq, evtchn;
 +
-+	balloon_lock(flags);
-+	balloon_append(page);
-+	bs.driver_pages--;
-+	balloon_unlock(flags);
++	init_evtchn_cpu_bindings();
 +
-+	schedule_work(&balloon_worker);
-+}
++	/* New event-channel space is not 'live' yet. */
++	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++		mask_evtchn(evtchn);
 +
-+EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
-+EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
-+EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
-+EXPORT_SYMBOL_GPL(balloon_release_driver_page);
++	/* Check that no PIRQs are still bound. */
++	for (pirq = 0; pirq < NR_PIRQS; pirq++)
++		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/balloon/common.h tmp-linux-2.6-xen.patch/drivers/xen/balloon/common.h
---- pristine-linux-2.6.18.2/drivers/xen/balloon/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/balloon/common.h	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,58 @@
-+/******************************************************************************
-+ * balloon/common.h
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	/* No IRQ <-> event-channel mappings. */
++	for (irq = 0; irq < NR_IRQS; irq++)
++		irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
++	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++		evtchn_to_irq[evtchn] = -1;
 +
-+#ifndef __XEN_BALLOON_COMMON_H__
-+#define __XEN_BALLOON_COMMON_H__
++	for_each_possible_cpu(cpu) {
++		restore_cpu_virqs(cpu);
++		restore_cpu_ipis(cpu);
++	}
 +
-+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
++}
 +
-+struct balloon_stats {
-+	/* We aim for 'current allocation' == 'target allocation'. */
-+	unsigned long current_pages;
-+	unsigned long target_pages;
-+	/* We may hit the hard limit in Xen. If we do then we remember it. */
-+	unsigned long hard_limit;
-+	/*
-+	 * Drivers may alter the memory reservation independently, but they
-+	 * must inform the balloon driver so we avoid hitting the hard limit.
-+	 */
-+	unsigned long driver_pages;
-+	/* Number of pages in high- and low-memory balloons. */
-+	unsigned long balloon_low;
-+	unsigned long balloon_high;
-+};
++void __init xen_init_IRQ(void)
++{
++	unsigned int i;
 +
-+extern struct balloon_stats balloon_stats;
-+#define bs balloon_stats
++	init_evtchn_cpu_bindings();
 +
-+int balloon_sysfs_init(void);
-+void balloon_sysfs_exit(void);
++	/* No event channels are 'live' right now. */
++	for (i = 0; i < NR_EVENT_CHANNELS; i++)
++		mask_evtchn(i);
 +
-+void balloon_set_new_target(unsigned long target);
++	/* No IRQ -> event-channel mappings. */
++	for (i = 0; i < NR_IRQS; i++)
++		irq_info[i] = IRQ_UNBOUND;
 +
-+#endif /* __XEN_BALLOON_COMMON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/balloon/Makefile tmp-linux-2.6-xen.patch/drivers/xen/balloon/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/balloon/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/balloon/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,2 @@
++	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
++	for (i = 0; i < NR_DYNIRQS; i++) {
++		irq_bindcount[dynirq_to_irq(i)] = 0;
 +
-+obj-y := balloon.o sysfs.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/balloon/sysfs.c tmp-linux-2.6-xen.patch/drivers/xen/balloon/sysfs.c
---- pristine-linux-2.6.18.2/drivers/xen/balloon/sysfs.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/balloon/sysfs.c	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,170 @@
++		irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
++		irq_desc[dynirq_to_irq(i)].action = NULL;
++		irq_desc[dynirq_to_irq(i)].depth = 1;
++		irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
++	}
++
++	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
++	for (i = 0; i < NR_PIRQS; i++) {
++		irq_bindcount[pirq_to_irq(i)] = 1;
++
++#ifdef RTC_IRQ
++		/* If not domain 0, force our RTC driver to fail its probe. */
++		if ((i == RTC_IRQ) && !is_initial_xendomain())
++			continue;
++#endif
++
++		irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
++		irq_desc[pirq_to_irq(i)].action = NULL;
++		irq_desc[pirq_to_irq(i)].depth = 1;
++		irq_desc[pirq_to_irq(i)].chip = &pirq_type;
++	}
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/features.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/features.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,34 @@
 +/******************************************************************************
-+ * balloon/sysfs.c
++ * features.c
 + *
-+ * Xen balloon driver - sysfs interfaces.
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * Xen feature flags.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
 + */
-+
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/stat.h>
-+#include <linux/string.h>
-+#include <linux/sysdev.h>
-+#include "common.h"
++#include <linux/types.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
 +
 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
 +#include <xen/platform-compat.h>
 +#endif
 +
-+#define BALLOON_CLASS_NAME "memory"
-+
-+#define BALLOON_SHOW(name, format, args...)			\
-+	static ssize_t show_##name(struct sys_device *dev,	\
-+				   char *buf)			\
-+	{							\
-+		return sprintf(buf, format, ##args);		\
-+	}							\
-+	static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
-+
-+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
-+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
-+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
-+BALLOON_SHOW(hard_limit_kb,
-+	     (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
-+	     (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
-+BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
-+
-+static ssize_t show_target_kb(struct sys_device *dev, char *buf)
-+{
-+	return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
-+}
++u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(xen_features);
 +
-+static ssize_t store_target_kb(struct sys_device *dev,
-+			       const char *buf,
-+			       size_t count)
++void setup_xen_features(void)
 +{
-+	char memstring[64], *endchar;
-+	unsigned long long target_bytes;
++	xen_feature_info_t fi;
++	int i, j;
 +
-+	if (!capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+	
-+	if (count <= 1)
-+		return -EBADMSG; /* runt */
-+	if (count > sizeof(memstring))
-+		return -EFBIG;   /* too long */
-+	strcpy(memstring, buf);
-+	
-+	target_bytes = memparse(memstring, &endchar);
-+	balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-+	
-+	return count;
++	for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
++		fi.submap_idx = i;
++		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
++			break;
++		for (j=0; j<32; j++)
++			xen_features[i*32+j] = !!(fi.submap & 1<<j);
++	}
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/firmware.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/firmware.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,74 @@
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <video/edid.h>
++#include <xen/interface/platform.h>
++#include <asm/hypervisor.h>
 +
-+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
-+		   show_target_kb, store_target_kb);
-+
-+static struct sysdev_attribute *balloon_attrs[] = {
-+	&attr_target_kb,
-+};
-+
-+static struct attribute *balloon_info_attrs[] = {
-+	&attr_current_kb.attr,
-+	&attr_low_kb.attr,
-+	&attr_high_kb.attr,
-+	&attr_hard_limit_kb.attr,
-+	&attr_driver_kb.attr,
-+	NULL
-+};
-+
-+static struct attribute_group balloon_info_group = {
-+	.name = "info",
-+	.attrs = balloon_info_attrs,
-+};
-+
-+static struct sysdev_class balloon_sysdev_class = {
-+	set_kset_name(BALLOON_CLASS_NAME),
-+};
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void __init copy_edd(void)
++{
++	int ret;
++	struct xen_platform_op op;
 +
-+static struct sys_device balloon_sysdev;
++	if (!is_initial_xendomain())
++		return;
 +
-+static int register_balloon(struct sys_device *sysdev)
-+{
-+	int i, error;
++	op.cmd = XENPF_firmware_info;
 +
-+	error = sysdev_class_register(&balloon_sysdev_class);
-+	if (error)
-+		return error;
++	op.u.firmware_info.type = XEN_FW_DISK_INFO;
++	for (op.u.firmware_info.index = 0;
++	     edd.edd_info_nr < EDDMAXNR;
++	     op.u.firmware_info.index++) {
++		struct edd_info *info = edd.edd_info + edd.edd_info_nr;
++
++		info->params.length = sizeof(info->params);
++		set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
++				     &info->params);
++		ret = HYPERVISOR_platform_op(&op);
++		if (ret)
++			break;
 +
-+	sysdev->id = 0;
-+	sysdev->cls = &balloon_sysdev_class;
++#define C(x) info->x = op.u.firmware_info.u.disk_info.x
++		C(device);
++		C(version);
++		C(interface_support);
++		C(legacy_max_cylinder);
++		C(legacy_max_head);
++		C(legacy_sectors_per_track);
++#undef C
 +
-+	error = sysdev_register(sysdev);
-+	if (error) {
-+		sysdev_class_unregister(&balloon_sysdev_class);
-+		return error;
++		edd.edd_info_nr++;
 +	}
 +
-+	for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
-+		error = sysdev_create_file(sysdev, balloon_attrs[i]);
-+		if (error)
-+			goto fail;
++	op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
++	for (op.u.firmware_info.index = 0;
++	     edd.mbr_signature_nr < EDD_MBR_SIG_MAX;
++	     op.u.firmware_info.index++) {
++		ret = HYPERVISOR_platform_op(&op);
++		if (ret)
++			break;
++		edd.mbr_signature[edd.mbr_signature_nr++] =
++			op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
 +	}
-+
-+	error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
-+	if (error)
-+		goto fail;
-+	
-+	return 0;
-+
-+ fail:
-+	while (--i >= 0)
-+		sysdev_remove_file(sysdev, balloon_attrs[i]);
-+	sysdev_unregister(sysdev);
-+	sysdev_class_unregister(&balloon_sysdev_class);
-+	return error;
 +}
++#endif
 +
-+static void unregister_balloon(struct sys_device *sysdev)
++void __init copy_edid(void)
 +{
-+	int i;
-+
-+	sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
-+	for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
-+		sysdev_remove_file(sysdev, balloon_attrs[i]);
-+	sysdev_unregister(sysdev);
-+	sysdev_class_unregister(&balloon_sysdev_class);
-+}
++#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
++	struct xen_platform_op op;
 +
-+int balloon_sysfs_init(void)
-+{
-+	return register_balloon(&balloon_sysdev);
-+}
++	if (!is_initial_xendomain())
++		return;
 +
-+void balloon_sysfs_exit(void)
-+{
-+	unregister_balloon(&balloon_sysdev);
++	op.cmd = XENPF_firmware_info;
++	op.u.firmware_info.index = 0;
++	op.u.firmware_info.type = XEN_FW_VBEDDC_INFO;
++	set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid,
++			     edid_info.dummy);
++	if (HYPERVISOR_platform_op(&op) != 0)
++		memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy));
++#endif
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/blkback.c tmp-linux-2.6-xen.patch/drivers/xen/blkback/blkback.c
---- pristine-linux-2.6.18.2/drivers/xen/blkback/blkback.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/blkback.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,617 @@
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/gnttab.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/gnttab.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,769 @@
 +/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/main.c
-+ * 
-+ * Back-end of the driver for virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A 
-+ * reference front-end implementation can be found in:
-+ *  arch/xen/drivers/blkif/frontend
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Copyright (c) 2005, Christopher Clark
-+ * 
++ * gnttab.c
++ *
++ * Granting foreign access to our memory reservation.
++ *
++ * Copyright (c) 2005-2006, Christopher Clark
++ * Copyright (c) 2004-2005, K A Fraser
++ *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License version 2
 + * as published by the Free Software Foundation; or, when distributed
 + * separately from the Linux kernel or incorporated into other
 + * software packages, subject to the following license:
-+ * 
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this source file (the "Software"), to deal in the Software without
 + * restriction, including without limitation the rights to use, copy, modify,
 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 + * and to permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
-+ * 
++ *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ * 
++ *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -51276,3050 +87244,2845 @@
 + * IN THE SOFTWARE.
 + */
 +
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <xen/balloon.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+
-+/*
-+ * These are rather arbitrary. They are fairly large because adjacent requests
-+ * pulled from a communication ring are quite likely to end up being part of
-+ * the same scatter/gather request at the disc.
-+ * 
-+ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
-+ * 
-+ * This will increase the chances of being able to write whole tracks.
-+ * 64 should be enough to keep us competitive with Linux.
-+ */
-+static int blkif_reqs = 64;
-+module_param_named(reqs, blkif_reqs, int, 0);
-+MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
-+
-+/* Run-time switchable: /sys/module/blkback/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a 
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a 
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+	blkif_t       *blkif;
-+	u64            id;
-+	int            nr_pages;
-+	atomic_t       pendcnt;
-+	unsigned short operation;
-+	int            status;
-+	struct list_head free_list;
-+} pending_req_t;
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/seqlock.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/synch_bitops.h>
++#include <asm/io.h>
++#include <xen/interface/memory.h>
++#include <xen/driver_util.h>
++#include <asm/gnttab_dma.h>
 +
-+static pending_req_t *pending_reqs;
-+static struct list_head pending_free;
-+static DEFINE_SPINLOCK(pending_free_lock);
-+static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+#define BLKBACK_INVALID_HANDLE (~0)
++/* External tools reserve first few grant table entries. */
++#define NR_RESERVED_ENTRIES 8
++#define GNTTAB_LIST_END 0xffffffff
++#define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
 +
-+static struct page **pending_pages;
-+static grant_handle_t *pending_grant_handles;
++static grant_ref_t **gnttab_list;
++static unsigned int nr_grant_frames;
++static unsigned int boot_max_nr_grant_frames;
++static int gnttab_free_count;
++static grant_ref_t gnttab_free_head;
++static DEFINE_SPINLOCK(gnttab_list_lock);
 +
-+static inline int vaddr_pagenr(pending_req_t *req, int seg)
-+{
-+	return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
-+}
++static struct grant_entry *shared;
 +
-+static inline unsigned long vaddr(pending_req_t *req, int seg)
-+{
-+	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
-+	return (unsigned long)pfn_to_kaddr(pfn);
-+}
++static struct gnttab_free_callback *gnttab_free_callback_list;
 +
-+#define pending_handle(_req, _seg) \
-+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
++static int gnttab_expand(unsigned int req_entries);
 +
++#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
++#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
 +
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, u64 id,
-+			  unsigned short op, int st);
++#define nr_freelist_frames(grant_frames)				\
++	(((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP)
 +
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static pending_req_t* alloc_req(void)
++static int get_free_entries(int count)
 +{
-+	pending_req_t *req = NULL;
 +	unsigned long flags;
++	int ref, rc;
++	grant_ref_t head;
 +
-+	spin_lock_irqsave(&pending_free_lock, flags);
-+	if (!list_empty(&pending_free)) {
-+		req = list_entry(pending_free.next, pending_req_t, free_list);
-+		list_del(&req->free_list);
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++
++	if ((gnttab_free_count < count) &&
++	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
++		spin_unlock_irqrestore(&gnttab_list_lock, flags);
++		return rc;
 +	}
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
-+	return req;
-+}
 +
-+static void free_req(pending_req_t *req)
-+{
-+	unsigned long flags;
-+	int was_empty;
++	ref = head = gnttab_free_head;
++	gnttab_free_count -= count;
++	while (count-- > 1)
++		head = gnttab_entry(head);
++ 	gnttab_free_head = gnttab_entry(head);
++	gnttab_entry(head) = GNTTAB_LIST_END;
 +
-+	spin_lock_irqsave(&pending_free_lock, flags);
-+	was_empty = list_empty(&pending_free);
-+	list_add(&req->free_list, &pending_free);
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
-+	if (was_empty)
-+		wake_up(&pending_free_wq);
-+}
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 +
-+static void unplug_queue(blkif_t *blkif)
-+{
-+	if (blkif->plug == NULL)
-+		return;
-+	if (blkif->plug->unplug_fn)
-+		blkif->plug->unplug_fn(blkif->plug);
-+	blk_put_queue(blkif->plug);
-+	blkif->plug = NULL;
++	return ref;
 +}
 +
-+static void plug_queue(blkif_t *blkif, struct bio *bio)
-+{
-+	request_queue_t *q = bdev_get_queue(bio->bi_bdev);
-+
-+	if (q == blkif->plug)
-+		return;
-+	unplug_queue(blkif);
-+	blk_get_queue(q);
-+	blkif->plug = q;
-+}
++#define get_free_entry() get_free_entries(1)
 +
-+static void fast_flush_area(pending_req_t *req)
++static void do_free_callbacks(void)
 +{
-+	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	unsigned int i, invcount = 0;
-+	grant_handle_t handle;
-+	int ret;
++	struct gnttab_free_callback *callback, *next;
 +
-+	for (i = 0; i < req->nr_pages; i++) {
-+		handle = pending_handle(req, i);
-+		if (handle == BLKBACK_INVALID_HANDLE)
-+			continue;
-+		gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
-+				    handle);
-+		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
-+		invcount++;
-+	}
++	callback = gnttab_free_callback_list;
++	gnttab_free_callback_list = NULL;
 +
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_unmap_grant_ref, unmap, invcount);
-+	BUG_ON(ret);
++	while (callback != NULL) {
++		next = callback->next;
++		if (gnttab_free_count >= callback->count) {
++			callback->next = NULL;
++			callback->fn(callback->arg);
++		} else {
++			callback->next = gnttab_free_callback_list;
++			gnttab_free_callback_list = callback;
++		}
++		callback = next;
++	}
 +}
 +
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static void print_stats(blkif_t *blkif)
++static inline void check_free_callbacks(void)
 +{
-+	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
-+	       current->comm, blkif->st_oo_req,
-+	       blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
-+	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+	blkif->st_rd_req = 0;
-+	blkif->st_wr_req = 0;
-+	blkif->st_oo_req = 0;
++	if (unlikely(gnttab_free_callback_list))
++		do_free_callbacks();
 +}
 +
-+int blkif_schedule(void *arg)
++static void put_free_entry(grant_ref_t ref)
 +{
-+	blkif_t *blkif = arg;
-+
-+	blkif_get(blkif);
-+
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: started\n", current->comm);
-+
-+	while (!kthread_should_stop()) {
-+		if (try_to_freeze())
-+			continue;
-+
-+		wait_event_interruptible(
-+			blkif->wq,
-+			blkif->waiting_reqs || kthread_should_stop());
-+		wait_event_interruptible(
-+			pending_free_wq,
-+			!list_empty(&pending_free) || kthread_should_stop());
-+
-+		blkif->waiting_reqs = 0;
-+		smp_mb(); /* clear flag *before* checking for work */
-+
-+		if (do_block_io_op(blkif))
-+			blkif->waiting_reqs = 1;
-+		unplug_queue(blkif);
-+
-+		if (log_stats && time_after(jiffies, blkif->st_print))
-+			print_stats(blkif);
-+	}
-+
-+	if (log_stats)
-+		print_stats(blkif);
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: exiting\n", current->comm);
-+
-+	blkif->xenblkd = NULL;
-+	blkif_put(blkif);
-+
-+	return 0;
++	unsigned long flags;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	gnttab_entry(ref) = gnttab_free_head;
++	gnttab_free_head = ref;
++	gnttab_free_count++;
++	check_free_callbacks();
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 +}
 +
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++/*
++ * Public grant-issuing interface functions
 + */
 +
-+static void __end_block_io_op(pending_req_t *pending_req, int error)
-+{
-+	/* An error fails the entire request. */
-+	if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-+	    (error == -EOPNOTSUPP)) {
-+		DPRINTK("blkback: write barrier op failed, not supported\n");
-+		blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
-+		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-+	} else if (error) {
-+		DPRINTK("Buffer not up-to-date at end of operation, "
-+			"error=%d\n", error);
-+		pending_req->status = BLKIF_RSP_ERROR;
-+	}
-+
-+	if (atomic_dec_and_test(&pending_req->pendcnt)) {
-+		fast_flush_area(pending_req);
-+		make_response(pending_req->blkif, pending_req->id,
-+			      pending_req->operation, pending_req->status);
-+		blkif_put(pending_req->blkif);
-+		free_req(pending_req);
-+	}
-+}
-+
-+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++				int flags)
 +{
-+	if (bio->bi_size != 0)
-+		return 1;
-+	__end_block_io_op(bio->bi_private, error);
-+	bio_put(bio);
-+	return error;
-+}
++	int ref;
 +
++	if (unlikely((ref = get_free_entry()) < 0))
++		return -ENOSPC;
 +
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
++	shared[ref].frame = frame;
++	shared[ref].domid = domid;
++	wmb();
++	BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++	shared[ref].flags = GTF_permit_access | flags;
 +
-+static void blkif_notify_work(blkif_t *blkif)
-+{
-+	blkif->waiting_reqs = 1;
-+	wake_up(&blkif->wq);
++	return ref;
 +}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 +
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++				     unsigned long frame, int flags)
 +{
-+	blkif_notify_work(dev_id);
-+	return IRQ_HANDLED;
++	shared[ref].frame = frame;
++	shared[ref].domid = domid;
++	wmb();
++	BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++	shared[ref].flags = GTF_permit_access | flags;
 +}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 +
 +
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif)
++int gnttab_query_foreign_access(grant_ref_t ref)
 +{
-+	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+	blkif_request_t req;
-+	pending_req_t *pending_req;
-+	RING_IDX rc, rp;
-+	int more_to_do = 0;
-+
-+	rc = blk_rings->common.req_cons;
-+	rp = blk_rings->common.sring->req_prod;
-+	rmb(); /* Ensure we see queued requests up to 'rp'. */
++	u16 nflags;
 +
-+	while ((rc != rp)) {
++	nflags = shared[ref].flags;
 +
-+		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
-+			break;
++	return (nflags & (GTF_reading|GTF_writing));
++}
++EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 +
-+		pending_req = alloc_req();
-+		if (NULL == pending_req) {
-+			blkif->st_oo_req++;
-+			more_to_do = 1;
-+			break;
-+		}
++int gnttab_end_foreign_access_ref(grant_ref_t ref)
++{
++	u16 flags, nflags;
 +
-+		switch (blkif->blk_protocol) {
-+		case BLKIF_PROTOCOL_NATIVE:
-+			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
-+			break;
-+		case BLKIF_PROTOCOL_X86_32:
-+			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
-+			break;
-+		case BLKIF_PROTOCOL_X86_64:
-+			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
-+			break;
-+		default:
-+			BUG();
++	nflags = shared[ref].flags;
++	do {
++		if ((flags = nflags) & (GTF_reading|GTF_writing)) {
++			printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
++			return 0;
 +		}
-+		blk_rings->common.req_cons = ++rc; /* before make_response() */
++	} while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
++		 flags);
 +
-+		switch (req.operation) {
-+		case BLKIF_OP_READ:
-+			blkif->st_rd_req++;
-+			dispatch_rw_block_io(blkif, &req, pending_req);
-+			break;
-+		case BLKIF_OP_WRITE_BARRIER:
-+			blkif->st_br_req++;
-+			/* fall through */
-+		case BLKIF_OP_WRITE:
-+			blkif->st_wr_req++;
-+			dispatch_rw_block_io(blkif, &req, pending_req);
-+			break;
-+		default:
-+			DPRINTK("error: unknown block io operation [%d]\n",
-+				req.operation);
-+			make_response(blkif, req.id, req.operation,
-+				      BLKIF_RSP_ERROR);
-+			free_req(pending_req);
-+			break;
-+		}
-+	}
-+	return more_to_do;
++	return 1;
 +}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 +
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req)
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
 +{
-+	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-+	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	struct phys_req preq;
-+	struct { 
-+		unsigned long buf; unsigned int nsec;
-+	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	unsigned int nseg;
-+	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	int ret, i, nbio = 0;
-+	int operation;
-+
-+	switch (req->operation) {
-+	case BLKIF_OP_READ:
-+		operation = READ;
-+		break;
-+	case BLKIF_OP_WRITE:
-+		operation = WRITE;
-+		break;
-+	case BLKIF_OP_WRITE_BARRIER:
-+		operation = WRITE_BARRIER;
-+		break;
-+	default:
-+		operation = 0; /* make gcc happy */
-+		BUG();
-+	}
-+
-+	/* Check that number of segments is sane. */
-+	nseg = req->nr_segments;
-+	if (unlikely(nseg == 0) || 
-+	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+		DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+		goto fail_response;
++	if (gnttab_end_foreign_access_ref(ref)) {
++		put_free_entry(ref);
++		if (page != 0)
++			free_page(page);
++	} else {
++		/* XXX This needs to be fixed so that the ref and page are
++		   placed on a list to be freed up later. */
++		printk(KERN_DEBUG
++		       "WARNING: leaking g.e. and page still in use!\n");
 +	}
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 +
-+	preq.dev           = req->handle;
-+	preq.sector_number = req->sector_number;
-+	preq.nr_sects      = 0;
-+
-+	pending_req->blkif     = blkif;
-+	pending_req->id        = req->id;
-+	pending_req->operation = req->operation;
-+	pending_req->status    = BLKIF_RSP_OKAY;
-+	pending_req->nr_pages  = nseg;
-+
-+	for (i = 0; i < nseg; i++) {
-+		uint32_t flags;
-+
-+		seg[i].nsec = req->seg[i].last_sect -
-+			req->seg[i].first_sect + 1;
-+
-+		if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-+		    (req->seg[i].last_sect < req->seg[i].first_sect))
-+			goto fail_response;
-+		preq.nr_sects += seg[i].nsec;
-+
-+		flags = GNTMAP_host_map;
-+		if (operation != READ)
-+			flags |= GNTMAP_readonly;
-+		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
-+				  req->seg[i].gref, blkif->domid);
-+	}
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
++{
++	int ref;
 +
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
-+	BUG_ON(ret);
++	if (unlikely((ref = get_free_entry()) < 0))
++		return -ENOSPC;
++	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 +
-+	for (i = 0; i < nseg; i++) {
-+		if (unlikely(map[i].status != 0)) {
-+			DPRINTK("invalid buffer -- could not remap it\n");
-+			map[i].handle = BLKBACK_INVALID_HANDLE;
-+			ret |= 1;
-+		}
++	return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 +
-+		pending_handle(pending_req, i) = map[i].handle;
++void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
++				       unsigned long pfn)
++{
++	shared[ref].frame = pfn;
++	shared[ref].domid = domid;
++	wmb();
++	shared[ref].flags = GTF_accept_transfer;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 +
-+		if (ret)
-+			continue;
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++{
++	unsigned long frame;
++	u16           flags;
 +
-+		set_phys_to_machine(__pa(vaddr(
-+			pending_req, i)) >> PAGE_SHIFT,
-+			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+		seg[i].buf  = map[i].dev_bus_addr | 
-+			(req->seg[i].first_sect << 9);
++	/*
++	 * If a transfer is not even yet started, try to reclaim the grant
++	 * reference and return failure (== 0).
++	 */
++	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
++		if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
++			return 0;
++		cpu_relax();
 +	}
 +
-+	if (ret)
-+		goto fail_flush;
-+
-+	if (vbd_translate(&preq, blkif, operation) != 0) {
-+		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
-+			operation == READ ? "read" : "write",
-+			preq.sector_number,
-+			preq.sector_number + preq.nr_sects, preq.dev);
-+		goto fail_flush;
++	/* If a transfer is in progress then wait until it is completed. */
++	while (!(flags & GTF_transfer_completed)) {
++		flags = shared[ref].flags;
++		cpu_relax();
 +	}
 +
-+	for (i = 0; i < nseg; i++) {
-+		if (((int)preq.sector_number|(int)seg[i].nsec) &
-+		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
-+			DPRINTK("Misaligned I/O request from domain %d",
-+				blkif->domid);
-+			goto fail_put_bio;
-+		}
++	/* Read the frame number /after/ reading completion status. */
++	rmb();
++	frame = shared[ref].frame;
++	BUG_ON(frame == 0);
 +
-+		while ((bio == NULL) ||
-+		       (bio_add_page(bio,
-+				     virt_to_page(vaddr(pending_req, i)),
-+				     seg[i].nsec << 9,
-+				     seg[i].buf & ~PAGE_MASK) == 0)) {
-+			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
-+			if (unlikely(bio == NULL))
-+				goto fail_put_bio;
++	return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 +
-+			bio->bi_bdev    = preq.bdev;
-+			bio->bi_private = pending_req;
-+			bio->bi_end_io  = end_block_io_op;
-+			bio->bi_sector  = preq.sector_number;
-+		}
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
++{
++	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
++	put_free_entry(ref);
++	return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 +
-+		preq.sector_number += seg[i].nsec;
++void gnttab_free_grant_reference(grant_ref_t ref)
++{
++	put_free_entry(ref);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
++
++void gnttab_free_grant_references(grant_ref_t head)
++{
++	grant_ref_t ref;
++	unsigned long flags;
++	int count = 1;
++	if (head == GNTTAB_LIST_END)
++		return;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	ref = head;
++	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
++		ref = gnttab_entry(ref);
++		count++;
 +	}
++	gnttab_entry(ref) = gnttab_free_head;
++	gnttab_free_head = head;
++	gnttab_free_count += count;
++	check_free_callbacks();
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 +
-+	plug_queue(blkif, bio);
-+	atomic_set(&pending_req->pendcnt, nbio);
-+	blkif_get(blkif);
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++{
++	int h = get_free_entries(count);
 +
-+	for (i = 0; i < nbio; i++)
-+		submit_bio(operation, biolist[i]);
++	if (h < 0)
++		return -ENOSPC;
 +
-+	if (operation == READ)
-+		blkif->st_rd_sect += preq.nr_sects;
-+	else if (operation == WRITE)
-+		blkif->st_wr_sect += preq.nr_sects;
++	*head = h;
 +
-+	return;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 +
-+ fail_put_bio:
-+	for (i = 0; i < (nbio-1); i++)
-+		bio_put(biolist[i]);
-+ fail_flush:
-+	fast_flush_area(pending_req);
-+ fail_response:
-+	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+	free_req(pending_req);
-+} 
++int gnttab_empty_grant_references(const grant_ref_t *private_head)
++{
++	return (*private_head == GNTTAB_LIST_END);
++}
++EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
++
++int gnttab_claim_grant_reference(grant_ref_t *private_head)
++{
++	grant_ref_t g = *private_head;
++	if (unlikely(g == GNTTAB_LIST_END))
++		return -ENOSPC;
++	*private_head = gnttab_entry(g);
++	return g;
++}
++EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 +
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++				    grant_ref_t release)
++{
++	gnttab_entry(release) = *private_head;
++	*private_head = release;
++}
++EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 +
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++				  void (*fn)(void *), void *arg, u16 count)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	if (callback->next)
++		goto out;
++	callback->fn = fn;
++	callback->arg = arg;
++	callback->count = count;
++	callback->next = gnttab_free_callback_list;
++	gnttab_free_callback_list = callback;
++	check_free_callbacks();
++out:
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 +
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
++{
++	struct gnttab_free_callback **pcb;
++	unsigned long flags;
 +
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
++		if (*pcb == callback) {
++			*pcb = callback->next;
++			break;
++		}
++	}
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 +
-+static void make_response(blkif_t *blkif, u64 id,
-+			  unsigned short op, int st)
++static int grow_gnttab_list(unsigned int more_frames)
 +{
-+	blkif_response_t  resp;
-+	unsigned long     flags;
-+	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+	int more_to_do = 0;
-+	int notify;
++	unsigned int new_nr_grant_frames, extra_entries, i;
++	unsigned int nr_glist_frames, new_nr_glist_frames;
 +
-+	resp.id        = id;
-+	resp.operation = op;
-+	resp.status    = st;
++	new_nr_grant_frames = nr_grant_frames + more_frames;
++	extra_entries       = more_frames * ENTRIES_PER_GRANT_FRAME;
 +
-+	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+	/* Place on the response ring for the relevant domain. */
-+	switch (blkif->blk_protocol) {
-+	case BLKIF_PROTOCOL_NATIVE:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	case BLKIF_PROTOCOL_X86_32:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	case BLKIF_PROTOCOL_X86_64:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	default:
-+		BUG();
++	nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++	new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames);
++	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
++		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
++		if (!gnttab_list[i])
++			goto grow_nomem;
 +	}
-+	blk_rings->common.rsp_prod_pvt++;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
-+	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-+		/*
-+		 * Tail check for pending requests. Allows frontend to avoid
-+		 * notifications if requests are already in flight (lower
-+		 * overheads and promotes batching).
-+		 */
-+		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
 +
-+	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-+		more_to_do = 1;
-+	}
++	for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++	     i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++		gnttab_entry(i) = i + 1;
 +
-+	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++	gnttab_entry(i) = gnttab_free_head;
++	gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++	gnttab_free_count += extra_entries;
 +
-+	if (more_to_do)
-+		blkif_notify_work(blkif);
-+	if (notify)
-+		notify_remote_via_irq(blkif->irq);
++	nr_grant_frames = new_nr_grant_frames;
++
++	check_free_callbacks();
++
++	return 0;
++	
++grow_nomem:
++	for ( ; i >= nr_glist_frames; i--)
++		free_page((unsigned long) gnttab_list[i]);
++	return -ENOMEM;
 +}
 +
-+static int __init blkif_init(void)
++static unsigned int __max_nr_grant_frames(void)
 +{
-+	int i, mmap_pages;
-+
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	struct gnttab_query_size query;
++	int rc;
 +
-+	mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
++	query.dom = DOMID_SELF;
 +
-+	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
-+					blkif_reqs, GFP_KERNEL);
-+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
-+					mmap_pages, GFP_KERNEL);
-+	pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
++	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
++	if ((rc < 0) || (query.status != GNTST_okay))
++		return 4; /* Legacy max supported number of frames */
 +
-+	if (!pending_reqs || !pending_grant_handles || !pending_pages)
-+		goto out_of_memory;
++	return query.max_nr_frames;
++}
 +
-+	for (i = 0; i < mmap_pages; i++)
-+		pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
++static inline unsigned int max_nr_grant_frames(void)
++{
++	unsigned int xen_max = __max_nr_grant_frames();
 +
-+	blkif_interface_init();
++	if (xen_max > boot_max_nr_grant_frames)
++		return boot_max_nr_grant_frames;
++	return xen_max;
++}
 +
-+	memset(pending_reqs, 0, sizeof(pending_reqs));
-+	INIT_LIST_HEAD(&pending_free);
++#ifdef CONFIG_XEN
 +
-+	for (i = 0; i < blkif_reqs; i++)
-+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
++static DEFINE_SEQLOCK(gnttab_dma_lock);
 +
-+	blkif_xenbus_init();
++#ifdef CONFIG_X86
++static int map_pte_fn(pte_t *pte, struct page *pmd_page,
++		      unsigned long addr, void *data)
++{
++	unsigned long **frames = (unsigned long **)data;
 +
++	set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
++	(*frames)++;
 +	return 0;
++}
 +
-+ out_of_memory:
-+	kfree(pending_reqs);
-+	kfree(pending_grant_handles);
-+	free_empty_pages_and_pagevec(pending_pages, mmap_pages);
-+	printk("%s: out of memory\n", __FUNCTION__);
-+	return -ENOMEM;
++static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
++			unsigned long addr, void *data)
++{
++
++	set_pte_at(&init_mm, addr, pte, __pte(0));
++	return 0;
 +}
 +
-+module_init(blkif_init);
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++	struct vm_struct *area;
++	area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
++	BUG_ON(area == NULL);
++	return area->addr;
++}
++#endif /* CONFIG_X86 */
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/common.h tmp-linux-2.6-xen.patch/drivers/xen/blkback/common.h
---- pristine-linux-2.6.18.2/drivers/xen/blkback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/common.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,139 @@
-+/* 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++	struct gnttab_setup_table setup;
++	unsigned long *frames;
++	unsigned int nr_gframes = end_idx + 1;
++	int rc;
 +
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
++	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
++	if (!frames)
++		return -ENOMEM;
 +
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <linux/wait.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/blkif.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+#include <xen/xenbus.h>
++	setup.dom        = DOMID_SELF;
++	setup.nr_frames  = nr_gframes;
++	set_xen_guest_handle(setup.frame_list, frames);
 +
-+#define DPRINTK(_f, _a...)			\
-+	pr_debug("(file=%s, line=%d) " _f,	\
-+		 __FILE__ , __LINE__ , ## _a )
++	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
++	if (rc == -ENOSYS) {
++		kfree(frames);
++		return -ENOSYS;
++	}
 +
-+struct vbd {
-+	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
-+	unsigned char  readonly;    /* Non-zero -> read-only */
-+	unsigned char  type;        /* VDISK_xxx */
-+	u32            pdevice;     /* phys device that this vbd maps to */
-+	struct block_device *bdev;
-+};
++	BUG_ON(rc || setup.status);
 +
-+struct backend_info;
++	if (shared == NULL)
++		shared = arch_gnttab_alloc_shared(frames);
 +
-+typedef struct blkif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t           domid;
-+	unsigned int      handle;
-+	/* Physical parameters of the comms window. */
-+	unsigned int      irq;
-+	/* Comms information. */
-+	enum blkif_protocol blk_protocol;
-+	blkif_back_rings_t blk_rings;
-+	struct vm_struct *blk_ring_area;
-+	/* The VBD attached to this interface. */
-+	struct vbd        vbd;
-+	/* Back pointer to the backend_info. */
-+	struct backend_info *be;
-+	/* Private fields. */
-+	spinlock_t       blk_ring_lock;
-+	atomic_t         refcnt;
++#ifdef CONFIG_X86
++	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
++				 PAGE_SIZE * nr_gframes,
++				 map_pte_fn, &frames);
++	BUG_ON(rc);
++	frames -= nr_gframes; /* adjust after map_pte_fn() */
++#endif /* CONFIG_X86 */
 +
-+	wait_queue_head_t   wq;
-+	struct task_struct  *xenblkd;
-+	unsigned int        waiting_reqs;
-+	request_queue_t     *plug;
++	kfree(frames);
 +
-+	/* statistics */
-+	unsigned long       st_print;
-+	int                 st_rd_req;
-+	int                 st_wr_req;
-+	int                 st_oo_req;
-+	int                 st_br_req;
-+	int                 st_rd_sect;
-+	int                 st_wr_sect;
++	return 0;
++}
 +
-+	wait_queue_head_t waiting_to_free;
++static void gnttab_page_free(struct page *page)
++{
++	ClearPageForeign(page);
++	gnttab_reset_grant_page(page);
++	put_page(page);
++}
 +
-+	grant_handle_t shmem_handle;
-+	grant_ref_t    shmem_ref;
-+} blkif_t;
++/*
++ * Must not be called with IRQs off.  This should only be used on the
++ * slow path.
++ *
++ * Copy a foreign granted page to local memory.
++ */
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
++{
++	struct gnttab_unmap_and_replace unmap;
++	mmu_update_t mmu;
++	struct page *page;
++	struct page *new_page;
++	void *new_addr;
++	void *addr;
++	paddr_t pfn;
++	maddr_t mfn;
++	maddr_t new_mfn;
++	int err;
 +
-+blkif_t *blkif_alloc(domid_t domid);
-+void blkif_disconnect(blkif_t *blkif);
-+void blkif_free(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++	page = *pagep;
++	if (!get_page_unless_zero(page))
++		return -ENOENT;
 +
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b)					\
-+	do {						\
-+		if (atomic_dec_and_test(&(_b)->refcnt))	\
-+			wake_up(&(_b)->waiting_to_free);\
-+	} while (0)
++	err = -ENOMEM;
++	new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++	if (!new_page)
++		goto out;
 +
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
-+	       unsigned minor, int readonly);
-+void vbd_free(struct vbd *vbd);
++	new_addr = page_address(new_page);
++	addr = page_address(page);
++	memcpy(new_addr, addr, PAGE_SIZE);
 +
-+unsigned long long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
++	pfn = page_to_pfn(page);
++	mfn = pfn_to_mfn(pfn);
++	new_mfn = virt_to_mfn(new_addr);
 +
-+struct phys_req {
-+	unsigned short       dev;
-+	unsigned short       nr_sects;
-+	struct block_device *bdev;
-+	blkif_sector_t       sector_number;
-+};
++	write_seqlock(&gnttab_dma_lock);
 +
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
++	/* Make seq visible before checking page_mapped. */
++	smp_mb();
 +
-+void blkif_interface_init(void);
++	/* Has the page been DMA-mapped? */
++	if (unlikely(page_mapped(page))) {
++		write_sequnlock(&gnttab_dma_lock);
++		put_page(new_page);
++		err = -EBUSY;
++		goto out;
++	}
 +
-+void blkif_xenbus_init(void);
++	if (!xen_feature(XENFEAT_auto_translated_physmap))
++		set_phys_to_machine(pfn, new_mfn);
 +
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int blkif_schedule(void *arg);
++	gnttab_set_replace_op(&unmap, (unsigned long)addr,
++			      (unsigned long)new_addr, ref);
 +
-+int blkback_barrier(struct xenbus_transaction xbt,
-+		    struct backend_info *be, int state);
++	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++					&unmap, 1);
++	BUG_ON(err);
++	BUG_ON(unmap.status);
 +
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/interface.c tmp-linux-2.6-xen.patch/drivers/xen/blkback/interface.c
---- pristine-linux-2.6.18.2/drivers/xen/blkback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/interface.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,181 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ * 
-+ * Block-device interface management.
-+ * 
-+ * Copyright (c) 2004, Keir Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	write_sequnlock(&gnttab_dma_lock);
 +
-+#include "common.h"
-+#include <xen/evtchn.h>
-+#include <linux/kthread.h>
++	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
 +
-+static kmem_cache_t *blkif_cachep;
++		mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++		mmu.val = pfn;
++		err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
++		BUG_ON(err);
++	}
 +
-+blkif_t *blkif_alloc(domid_t domid)
-+{
-+	blkif_t *blkif;
++	new_page->mapping = page->mapping;
++	new_page->index = page->index;
++	set_bit(PG_foreign, &new_page->flags);
++	*pagep = new_page;
 +
-+	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+	if (!blkif)
-+		return ERR_PTR(-ENOMEM);
++	SetPageForeign(page, gnttab_page_free);
++	page->mapping = NULL;
 +
-+	memset(blkif, 0, sizeof(*blkif));
-+	blkif->domid = domid;
-+	spin_lock_init(&blkif->blk_ring_lock);
-+	atomic_set(&blkif->refcnt, 1);
-+	init_waitqueue_head(&blkif->wq);
-+	blkif->st_print = jiffies;
-+	init_waitqueue_head(&blkif->waiting_to_free);
++out:
++	put_page(page);
++	return err;
++}
++EXPORT_SYMBOL_GPL(gnttab_copy_grant_page);
 +
-+	return blkif;
++void gnttab_reset_grant_page(struct page *page)
++{
++	init_page_count(page);
++	reset_page_mapcount(page);
 +}
++EXPORT_SYMBOL_GPL(gnttab_reset_grant_page);
 +
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++/*
++ * Keep track of foreign pages marked as PageForeign so that we don't
++ * return them to the remote domain prematurely.
++ *
++ * PageForeign pages are pinned down by increasing their mapcount.
++ *
++ * All other pages are simply returned as is.
++ */
++void __gnttab_dma_map_page(struct page *page)
 +{
-+	struct gnttab_map_grant_ref op;
++	unsigned int seq;
 +
-+	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+			  GNTMAP_host_map, shared_page, blkif->domid);
++	if (!is_running_on_xen() || !PageForeign(page))
++		return;
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++	do {
++		seq = read_seqbegin(&gnttab_dma_lock);
 +
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
++		if (gnttab_dma_local_pfn(page))
++			break;
 +
-+	blkif->shmem_ref = shared_page;
-+	blkif->shmem_handle = op.handle;
++		atomic_set(&page->_mapcount, 0);
 +
-+	return 0;
++		/* Make _mapcount visible before read_seqretry. */
++		smp_mb();
++	} while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
 +}
 +
-+static void unmap_frontend_page(blkif_t *blkif)
++int gnttab_resume(void)
 +{
-+	struct gnttab_unmap_grant_ref op;
-+
-+	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+			    GNTMAP_host_map, blkif->shmem_handle);
-+
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	if (max_nr_grant_frames() < nr_grant_frames)
++		return -ENOSYS;
++	return gnttab_map(0, nr_grant_frames - 1);
 +}
 +
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++int gnttab_suspend(void)
 +{
-+	int err;
++#ifdef CONFIG_X86
++	apply_to_page_range(&init_mm, (unsigned long)shared,
++			    PAGE_SIZE * nr_grant_frames,
++			    unmap_pte_fn, NULL);
++#endif
++	return 0;
++}
 +
-+	/* Already connected through? */
-+	if (blkif->irq)
-+		return 0;
++#else /* !CONFIG_XEN */
 +
-+	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+		return -ENOMEM;
++#include <platform-pci.h>
 +
-+	err = map_frontend_page(blkif, shared_page);
-+	if (err) {
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
-+	}
++static unsigned long resume_frames;
 +
-+	switch (blkif->blk_protocol) {
-+	case BLKIF_PROTOCOL_NATIVE:
-+	{
-+		blkif_sring_t *sring;
-+		sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
-+		break;
-+	}
-+	case BLKIF_PROTOCOL_X86_32:
-+	{
-+		blkif_x86_32_sring_t *sring_x86_32;
-+		sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
-+		break;
-+	}
-+	case BLKIF_PROTOCOL_X86_64:
-+	{
-+		blkif_x86_64_sring_t *sring_x86_64;
-+		sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
-+		break;
-+	}
-+	default:
-+		BUG();
-+	}
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++	struct xen_add_to_physmap xatp;
++	unsigned int i = end_idx;
 +
-+	err = bind_interdomain_evtchn_to_irqhandler(
-+		blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+	if (err < 0)
-+	{
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_rings.common.sring = NULL;
-+		return err;
-+	}
-+	blkif->irq = err;
++	/* Loop backwards, so that the first hypercall has the largest index,
++	 * ensuring that the table will grow only once.
++	 */
++	do {
++		xatp.domid = DOMID_SELF;
++		xatp.idx = i;
++		xatp.space = XENMAPSPACE_grant_table;
++		xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
++		if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
++			BUG();
++	} while (i-- > start_idx);
 +
 +	return 0;
 +}
 +
-+void blkif_disconnect(blkif_t *blkif)
++int gnttab_resume(void)
 +{
-+	if (blkif->xenblkd) {
-+		kthread_stop(blkif->xenblkd);
-+		blkif->xenblkd = NULL;
-+	}
-+
-+	atomic_dec(&blkif->refcnt);
-+	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
-+	atomic_inc(&blkif->refcnt);
++	unsigned int max_nr_gframes, nr_gframes;
 +
-+	if (blkif->irq) {
-+		unbind_from_irqhandler(blkif->irq, blkif);
-+		blkif->irq = 0;
-+	}
++	nr_gframes = nr_grant_frames;
++	max_nr_gframes = max_nr_grant_frames();
++	if (max_nr_gframes < nr_gframes)
++		return -ENOSYS;
 +
-+	if (blkif->blk_rings.common.sring) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_rings.common.sring = NULL;
++	if (!resume_frames) {
++		resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
++		shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
++		if (shared == NULL) {
++			printk("error to ioremap gnttab share frames\n");
++			return -1;
++		}
 +	}
-+}
 +
-+void blkif_free(blkif_t *blkif)
-+{
-+	if (!atomic_dec_and_test(&blkif->refcnt))
-+		BUG();
-+	kmem_cache_free(blkif_cachep, blkif);
-+}
++	gnttab_map(0, nr_gframes - 1);
 +
-+void __init blkif_interface_init(void)
-+{
-+	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
-+					 0, 0, NULL, NULL);
++	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/Makefile tmp-linux-2.6-xen.patch/drivers/xen/blkback/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/blkback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,3 @@
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
 +
-+blkbk-y	:= blkback.o xenbus.o interface.o vbd.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/vbd.c tmp-linux-2.6-xen.patch/drivers/xen/blkback/vbd.c
---- pristine-linux-2.6.18.2/drivers/xen/blkback/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/vbd.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,118 @@
-+/******************************************************************************
-+ * blkback/vbd.c
-+ * 
-+ * Routines for managing virtual block devices (VBDs).
-+ * 
-+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++#endif /* !CONFIG_XEN */
 +
-+#include "common.h"
++static int gnttab_expand(unsigned int req_entries)
++{
++	int rc;
++	unsigned int cur, extra;
 +
-+#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
-+	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
++	cur = nr_grant_frames;
++	extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) /
++		 ENTRIES_PER_GRANT_FRAME);
++	if (cur + extra > max_nr_grant_frames())
++		return -ENOSPC;
 +
-+unsigned long long vbd_size(struct vbd *vbd)
-+{
-+	return vbd_sz(vbd);
-+}
++	if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
++		rc = grow_gnttab_list(extra);
 +
-+unsigned int vbd_info(struct vbd *vbd)
-+{
-+	return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++	return rc;
 +}
 +
-+unsigned long vbd_secsize(struct vbd *vbd)
++int __devinit gnttab_init(void)
 +{
-+	return bdev_hardsect_size(vbd->bdev);
-+}
++	int i;
++	unsigned int max_nr_glist_frames, nr_glist_frames;
++	unsigned int nr_init_grefs;
 +
-+int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
-+	       unsigned minor, int readonly)
-+{
-+	struct vbd *vbd;
-+	struct block_device *bdev;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	vbd = &blkif->vbd;
-+	vbd->handle   = handle; 
-+	vbd->readonly = readonly;
-+	vbd->type     = 0;
++	nr_grant_frames = 1;
++	boot_max_nr_grant_frames = __max_nr_grant_frames();
 +
-+	vbd->pdevice  = MKDEV(major, minor);
++	/* Determine the maximum number of frames required for the
++	 * grant reference free list on the current hypervisor.
++	 */
++	max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames);
 +
-+	bdev = open_by_devnum(vbd->pdevice,
-+			      vbd->readonly ? FMODE_READ : FMODE_WRITE);
++	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
++			      GFP_KERNEL);
++	if (gnttab_list == NULL)
++		return -ENOMEM;
 +
-+	if (IS_ERR(bdev)) {
-+		DPRINTK("vbd_creat: device %08x could not be opened.\n",
-+			vbd->pdevice);
-+		return -ENOENT;
++	nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++	for (i = 0; i < nr_glist_frames; i++) {
++		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
++		if (gnttab_list[i] == NULL)
++			goto ini_nomem;
 +	}
 +
-+	vbd->bdev = bdev;
++	if (gnttab_resume() < 0)
++		return -ENODEV;
 +
-+	if (vbd->bdev->bd_disk == NULL) {
-+		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+			vbd->pdevice);
-+		vbd_free(vbd);
-+		return -ENOENT;
-+	}
++	nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME;
 +
-+	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
-+		vbd->type |= VDISK_CDROM;
-+	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
-+		vbd->type |= VDISK_REMOVABLE;
++	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
++		gnttab_entry(i) = i + 1;
++
++	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
++	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
++	gnttab_free_head  = NR_RESERVED_ENTRIES;
 +
-+	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
-+		handle, blkif->domid);
 +	return 0;
++
++ ini_nomem:
++	for (i--; i >= 0; i--)
++		free_page((unsigned long)gnttab_list[i]);
++	kfree(gnttab_list);
++	return -ENOMEM;
 +}
 +
-+void vbd_free(struct vbd *vbd)
++#ifdef CONFIG_XEN
++core_initcall(gnttab_init);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/hypervisor_sysfs.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/hypervisor_sysfs.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,57 @@
++/*
++ *  copyright (c) 2006 IBM Corporation
++ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License version 2 as
++ *  published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/kobject.h>
++#include <xen/hypervisor_sysfs.h>
++#include <asm/hypervisor.h>
++
++static ssize_t hyp_sysfs_show(struct kobject *kobj,
++			      struct attribute *attr,
++			      char *buffer)
 +{
-+	if (vbd->bdev)
-+		blkdev_put(vbd->bdev);
-+	vbd->bdev = NULL;
++	struct hyp_sysfs_attr *hyp_attr;
++	hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++	if (hyp_attr->show)
++		return hyp_attr->show(hyp_attr, buffer);
++	return 0;
 +}
 +
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++static ssize_t hyp_sysfs_store(struct kobject *kobj,
++			       struct attribute *attr,
++			       const char *buffer,
++			       size_t len)
 +{
-+	struct vbd *vbd = &blkif->vbd;
-+	int rc = -EACCES;
++	struct hyp_sysfs_attr *hyp_attr;
++	hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++	if (hyp_attr->store)
++		return hyp_attr->store(hyp_attr, buffer, len);
++	return 0;
++}
 +
-+	if ((operation != READ) && vbd->readonly)
-+		goto out;
++static struct sysfs_ops hyp_sysfs_ops = {
++	.show = hyp_sysfs_show,
++	.store = hyp_sysfs_store,
++};
 +
-+	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
-+		goto out;
++static struct kobj_type hyp_sysfs_kobj_type = {
++	.sysfs_ops = &hyp_sysfs_ops,
++};
 +
-+	req->dev  = vbd->pdevice;
-+	req->bdev = vbd->bdev;
-+	rc = 0;
++static int __init hypervisor_subsys_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+ out:
-+	return rc;
++	hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
++	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkback/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/blkback/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/blkback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkback/xenbus.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,533 @@
-+/*  Xenbus code for blkif backend
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+    Copyright (C) 2005 XenSource Ltd
 +
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
++device_initcall(hypervisor_subsys_init);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/machine_kexec.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/machine_kexec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,207 @@
++/*
++ * drivers/xen/core/machine_kexec.c 
++ * handle transition of Linux booting another kernel
++ */
 +
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
++#include <linux/kexec.h>
++#include <xen/interface/kexec.h>
++#include <linux/mm.h>
++#include <linux/bootmem.h>
 +
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
++extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, 
++					 struct kimage *image);
++extern int machine_kexec_setup_resources(struct resource *hypervisor,
++					 struct resource *phys_cpus,
++					 int nr_phys_cpus);
++extern void machine_kexec_register_resources(struct resource *res);
 +
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include "common.h"
++static int __initdata xen_max_nr_phys_cpus;
++static struct resource xen_hypervisor_res;
++static struct resource *xen_phys_cpus;
 +
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...)				\
-+	pr_debug("blkback/xenbus (%s:%d) " fmt ".\n",	\
-+		 __FUNCTION__, __LINE__, ##args)
++size_t vmcoreinfo_size_xen;
++unsigned long paddr_vmcoreinfo_xen;
 +
-+struct backend_info
++void __init xen_machine_kexec_setup_resources(void)
 +{
-+	struct xenbus_device *dev;
-+	blkif_t *blkif;
-+	struct xenbus_watch backend_watch;
-+	unsigned major;
-+	unsigned minor;
-+	char *mode;
-+};
++	xen_kexec_range_t range;
++	struct resource *res;
++	int k = 0;
 +
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+			    unsigned int);
++	if (!is_initial_xendomain())
++		return;
 +
-+static int blkback_name(blkif_t *blkif, char *buf)
-+{
-+	char *devpath, *devname;
-+	struct xenbus_device *dev = blkif->be->dev;
++	/* determine maximum number of physical cpus */
 +
-+	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
-+	if (IS_ERR(devpath)) 
-+		return PTR_ERR(devpath);
-+	
-+	if ((devname = strstr(devpath, "/dev/")) != NULL)
-+		devname += strlen("/dev/");
-+	else
-+		devname  = devpath;
++	while (1) {
++		memset(&range, 0, sizeof(range));
++		range.range = KEXEC_RANGE_MA_CPU;
++		range.nr = k;
 +
-+	snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
-+	kfree(devpath);
-+	
-+	return 0;
-+}
++		if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++			break;
 +
-+static void update_blkif_status(blkif_t *blkif)
-+{ 
-+	int err;
-+	char name[TASK_COMM_LEN];
++		k++;
++	}
 +
-+	/* Not ready to connect? */
-+	if (!blkif->irq || !blkif->vbd.bdev)
++	if (k == 0)
 +		return;
 +
-+	/* Already connected? */
-+	if (blkif->be->dev->state == XenbusStateConnected)
-+		return;
++	xen_max_nr_phys_cpus = k;
 +
-+	/* Attempt to connect: exit if we fail to. */
-+	connect(blkif->be);
-+	if (blkif->be->dev->state != XenbusStateConnected)
-+		return;
++	/* allocate xen_phys_cpus */
 +
-+	err = blkback_name(blkif, name);
-+	if (err) {
-+		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
-+		return;
-+	}
++	xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
++	BUG_ON(xen_phys_cpus == NULL);
 +
-+	blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
-+	if (IS_ERR(blkif->xenblkd)) {
-+		err = PTR_ERR(blkif->xenblkd);
-+		blkif->xenblkd = NULL;
-+		xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
++	/* fill in xen_phys_cpus with per-cpu crash note information */
++
++	for (k = 0; k < xen_max_nr_phys_cpus; k++) {
++		memset(&range, 0, sizeof(range));
++		range.range = KEXEC_RANGE_MA_CPU;
++		range.nr = k;
++
++		if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++			goto err;
++
++		res = xen_phys_cpus + k;
++
++		memset(res, 0, sizeof(*res));
++		res->name = "Crash note";
++		res->start = range.start;
++		res->end = range.start + range.size - 1;
++		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
 +	}
-+}
 +
++	/* fill in xen_hypervisor_res with hypervisor machine address range */
 +
-+/****************************************************************
-+ *  sysfs interface for VBD I/O requests
-+ */
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_XEN;
 +
-+#define VBD_SHOW(name, format, args...)					\
-+	static ssize_t show_##name(struct device *_dev,			\
-+				   struct device_attribute *attr,	\
-+				   char *buf)				\
-+	{								\
-+		struct xenbus_device *dev = to_xenbus_device(_dev);	\
-+		struct backend_info *be = dev->dev.driver_data;		\
-+									\
-+		return sprintf(buf, format, ##args);			\
-+	}								\
-+	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		goto err;
 +
-+VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
-+VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
-+VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
-+VBD_SHOW(br_req,  "%d\n", be->blkif->st_br_req);
-+VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
-+VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++	xen_hypervisor_res.name = "Hypervisor code and data";
++	xen_hypervisor_res.start = range.start;
++	xen_hypervisor_res.end = range.start + range.size - 1;
++	xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
 +
-+static struct attribute *vbdstat_attrs[] = {
-+	&dev_attr_oo_req.attr,
-+	&dev_attr_rd_req.attr,
-+	&dev_attr_wr_req.attr,
-+	&dev_attr_br_req.attr,
-+	&dev_attr_rd_sect.attr,
-+	&dev_attr_wr_sect.attr,
-+	NULL
-+};
++	/* fill in crashk_res if range is reserved by hypervisor */
 +
-+static struct attribute_group vbdstat_group = {
-+	.name = "statistics",
-+	.attrs = vbdstat_attrs,
-+};
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_CRASH;
++
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		goto err;
++
++	if (range.size) {
++		crashk_res.start = range.start;
++		crashk_res.end = range.start + range.size - 1;
++	}
 +
-+VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
-+VBD_SHOW(mode, "%s\n", be->mode);
++	/* get physical address of vmcoreinfo */
++	memset(&range, 0, sizeof(range));
++	range.range = KEXEC_RANGE_MA_VMCOREINFO;
 +
-+int xenvbd_sysfs_addif(struct xenbus_device *dev)
-+{
-+	int error;
-+	
-+	error = device_create_file(&dev->dev, &dev_attr_physical_device);
-+ 	if (error)
-+		goto fail1;
++	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++		return;
 +
-+	error = device_create_file(&dev->dev, &dev_attr_mode);
-+	if (error)
-+		goto fail2;
++	if (range.size) {
++		paddr_vmcoreinfo_xen = range.start;
++		vmcoreinfo_size_xen = range.size;
++	}
 +
-+	error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
-+	if (error)
-+		goto fail3;
++	if (machine_kexec_setup_resources(&xen_hypervisor_res, xen_phys_cpus,
++					  xen_max_nr_phys_cpus))
++		goto err;
 +
-+	return 0;
++	return;
 +
-+fail3:	sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
-+fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
-+fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
-+	return error;
++ err:
++	/*
++	 * It isn't possible to free xen_phys_cpus this early in the
++	 * boot. Failure at this stage is unexpected and the amount of
++	 * memory is small therefore we tolerate the potential leak.
++         */
++	xen_max_nr_phys_cpus = 0;
++	return;
 +}
 +
-+void xenvbd_sysfs_delif(struct xenbus_device *dev)
++void __init xen_machine_kexec_register_resources(struct resource *res)
 +{
-+	sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
-+	device_remove_file(&dev->dev, &dev_attr_mode);
-+	device_remove_file(&dev->dev, &dev_attr_physical_device);
++	request_resource(res, &xen_hypervisor_res);
++	machine_kexec_register_resources(res);
 +}
 +
-+static int blkback_remove(struct xenbus_device *dev)
++static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
 +{
-+	struct backend_info *be = dev->dev.driver_data;
-+
-+	DPRINTK("");
-+
-+	if (be->major || be->minor)
-+		xenvbd_sysfs_delif(dev);
++	machine_kexec_setup_load_arg(xki, image);
 +
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
++	xki->indirection_page = image->head;
++	xki->start_address = image->start;
++}
 +
-+	if (be->blkif) {
-+		blkif_disconnect(be->blkif);
-+		vbd_free(&be->blkif->vbd);
-+		blkif_free(be->blkif);
-+		be->blkif = NULL;
-+	}
++/*
++ * Load the image into xen so xen can kdump itself
++ * This might have been done in prepare, but prepare
++ * is currently called too early. It might make sense
++ * to move prepare, but for now, just add an extra hook.
++ */
++int xen_machine_kexec_load(struct kimage *image)
++{
++	xen_kexec_load_t xkl;
 +
-+	kfree(be);
-+	dev->dev.driver_data = NULL;
-+	return 0;
++	memset(&xkl, 0, sizeof(xkl));
++	xkl.type = image->type;
++	setup_load_arg(&xkl.image, image);
++	return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
 +}
 +
-+int blkback_barrier(struct xenbus_transaction xbt,
-+		    struct backend_info *be, int state)
++/*
++ * Unload the image that was stored by machine_kexec_load()
++ * This might have been done in machine_kexec_cleanup() but it
++ * is called too late, and its possible xen could try and kdump
++ * using resources that have been freed.
++ */
++void xen_machine_kexec_unload(struct kimage *image)
 +{
-+	struct xenbus_device *dev = be->dev;
-+	int err;
-+
-+	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
-+			    "%d", state);
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "writing feature-barrier");
++	xen_kexec_load_t xkl;
 +
-+	return err;
++	memset(&xkl, 0, sizeof(xkl));
++	xkl.type = image->type;
++	WARN_ON(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl));
 +}
 +
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's physical major and minor numbers.  Switch to InitWait.
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ *
++ * This has the hypervisor move to the prefered reboot CPU, 
++ * stop all CPUs and kexec. That is it combines machine_shutdown()
++ * and machine_kexec() in Linux kexec terms.
 + */
-+static int blkback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
++NORET_TYPE void machine_kexec(struct kimage *image)
 +{
-+	int err;
-+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
-+		return -ENOMEM;
-+	}
-+	be->dev = dev;
-+	dev->dev.driver_data = be;
++	xen_kexec_exec_t xke;
 +
-+	be->blkif = blkif_alloc(dev->otherend_id);
-+	if (IS_ERR(be->blkif)) {
-+		err = PTR_ERR(be->blkif);
-+		be->blkif = NULL;
-+		xenbus_dev_fatal(dev, err, "creating block interface");
-+		goto fail;
-+	}
++	memset(&xke, 0, sizeof(xke));
++	xke.type = image->type;
++	VOID(HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke));
++	panic("KEXEC_CMD_kexec hypercall should not return\n");
++}
 +
-+	/* setup back pointer */
-+	be->blkif->be = be;
++void machine_shutdown(void)
++{
++	/* do nothing */
++}
 +
-+	err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
-+				 &be->backend_watch, backend_changed);
-+	if (err)
-+		goto fail;
 +
-+	err = xenbus_switch_state(dev, XenbusStateInitWait);
-+	if (err)
-+		goto fail;
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/machine_reboot.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/machine_reboot.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,247 @@
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <linux/stringify.h>
++#include <linux/stop_machine.h>
++#include <asm/irq.h>
++#include <asm/mmu_context.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <linux/cpu.h>
++#include <xen/gnttab.h>
++#include <xen/xencons.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/interface/vcpu.h>
 +
-+	return 0;
++#if defined(__i386__) || defined(__x86_64__)
 +
-+fail:
-+	DPRINTK("failed");
-+	blkback_remove(dev);
-+	return err;
++/*
++ * Power off function, if any
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
++
++void machine_emergency_restart(void)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	HYPERVISOR_shutdown(SHUTDOWN_reboot);
 +}
 +
++void machine_restart(char * __unused)
++{
++	machine_emergency_restart();
++}
 +
-+/**
-+ * Callback received when the hotplug scripts have placed the physical-device
-+ * node.  Read it and the mode node, and create a vbd.  If the frontend is
-+ * ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
++void machine_halt(void)
 +{
-+	int err;
-+	unsigned major;
-+	unsigned minor;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
++	machine_power_off();
++}
 +
-+	DPRINTK("");
++void machine_power_off(void)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	if (pm_power_off)
++		pm_power_off();
++	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
 +
-+	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
-+			   &major, &minor);
-+	if (XENBUS_EXIST_ERR(err)) {
-+		/* Since this watch will fire once immediately after it is
-+		   registered, we expect this.  Ignore it, and wait for the
-+		   hotplug scripts. */
-+		return;
-+	}
-+	if (err != 2) {
-+		xenbus_dev_fatal(dev, err, "reading physical-device");
-+		return;
-+	}
++int reboot_thru_bios = 0;	/* for dmi_scan.c */
++EXPORT_SYMBOL(machine_restart);
++EXPORT_SYMBOL(machine_halt);
++EXPORT_SYMBOL(machine_power_off);
 +
-+	if ((be->major || be->minor) &&
-+	    ((be->major != major) || (be->minor != minor))) {
-+		printk(KERN_WARNING
-+		       "blkback: changing physical device (from %x:%x to "
-+		       "%x:%x) not supported.\n", be->major, be->minor,
-+		       major, minor);
-+		return;
-+	}
++static void pre_suspend(void)
++{
++	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++	WARN_ON(HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++					     __pte_ma(0), 0));
 +
-+	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
-+	if (IS_ERR(be->mode)) {
-+		err = PTR_ERR(be->mode);
-+		be->mode = NULL;
-+		xenbus_dev_fatal(dev, err, "reading mode");
-+		return;
-+	}
++	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
++	xen_start_info->console.domU.mfn =
++		mfn_to_pfn(xen_start_info->console.domU.mfn);
++}
 +
-+	if (be->major == 0 && be->minor == 0) {
-+		/* Front end dir is a number, which is used as the handle. */
++static void post_suspend(int suspend_cancelled)
++{
++	int i, j, k, fpp;
++	unsigned long shinfo_mfn;
++	extern unsigned long max_pfn;
++	extern unsigned long *pfn_to_mfn_frame_list_list;
++	extern unsigned long *pfn_to_mfn_frame_list[];
 +
-+		char *p = strrchr(dev->otherend, '/') + 1;
-+		long handle = simple_strtoul(p, NULL, 0);
++	if (suspend_cancelled) {
++		xen_start_info->store_mfn =
++			pfn_to_mfn(xen_start_info->store_mfn);
++		xen_start_info->console.domU.mfn =
++			pfn_to_mfn(xen_start_info->console.domU.mfn);
++	} else {
++#ifdef CONFIG_SMP
++		cpu_initialized_map = cpu_online_map;
++#endif
++	}
 +
-+		be->major = major;
-+		be->minor = minor;
++	shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
++	if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++					 pfn_pte_ma(shinfo_mfn, PAGE_KERNEL),
++					 0))
++		BUG();
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
 +
-+		err = vbd_create(be->blkif, handle, major, minor,
-+				 (NULL == strchr(be->mode, 'w')));
-+		if (err) {
-+			be->major = be->minor = 0;
-+			xenbus_dev_fatal(dev, err, "creating vbd structure");
-+			return;
-+		}
++	memset(empty_zero_page, 0, PAGE_SIZE);
 +
-+		err = xenvbd_sysfs_addif(dev);
-+		if (err) {
-+			vbd_free(&be->blkif->vbd);
-+			be->major = be->minor = 0;
-+			xenbus_dev_fatal(dev, err, "creating sysfs entries");
-+			return;
++	fpp = PAGE_SIZE/sizeof(unsigned long);
++	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
++		if ((j % fpp) == 0) {
++			k++;
++			pfn_to_mfn_frame_list_list[k] =
++				virt_to_mfn(pfn_to_mfn_frame_list[k]);
++			j = 0;
 +		}
-+
-+		/* We're potentially connected now */
-+		update_blkif_status(be->blkif);
++		pfn_to_mfn_frame_list[k][j] =
++			virt_to_mfn(&phys_to_machine_mapping[i]);
 +	}
++	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++		virt_to_mfn(pfn_to_mfn_frame_list_list);
 +}
 +
++#else /* !(defined(__i386__) || defined(__x86_64__)) */
 +
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state)
-+{
-+	struct backend_info *be = dev->dev.driver_data;
-+	int err;
-+
-+	DPRINTK("%s", xenbus_strstate(frontend_state));
-+
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+		if (dev->state == XenbusStateClosed) {
-+			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+			       __FUNCTION__, dev->nodename);
-+			xenbus_switch_state(dev, XenbusStateInitWait);
-+		}
-+		break;
++#ifndef HAVE_XEN_PRE_SUSPEND
++#define xen_pre_suspend()	((void)0)
++#endif
 +
-+	case XenbusStateInitialised:
-+	case XenbusStateConnected:
-+		/* Ensure we connect even when two watches fire in 
-+		   close successsion and we miss the intermediate value 
-+		   of frontend_state. */
-+		if (dev->state == XenbusStateConnected)
-+			break;
++#ifndef HAVE_XEN_POST_SUSPEND
++#define xen_post_suspend(x)	((void)0)
++#endif
 +
-+		err = connect_ring(be);
-+		if (err)
-+			break;
-+		update_blkif_status(be->blkif);
-+		break;
++#define switch_idle_mm()	((void)0)
++#define mm_pin_all()		((void)0)
++#define pre_suspend()		xen_pre_suspend()
++#define post_suspend(x)		xen_post_suspend(x)
 +
-+	case XenbusStateClosing:
-+		blkif_disconnect(be->blkif);
-+		xenbus_switch_state(dev, XenbusStateClosing);
-+		break;
++#endif
 +
-+	case XenbusStateClosed:
-+		xenbus_switch_state(dev, XenbusStateClosed);
-+		if (xenbus_dev_is_online(dev))
-+			break;
-+		/* fall through if not online */
-+	case XenbusStateUnknown:
-+		device_unregister(&dev->dev);
-+		break;
++struct suspend {
++	int fast_suspend;
++	void (*resume_notifier)(int);
++};
 +
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+				 frontend_state);
-+		break;
-+	}
-+}
++static int take_machine_down(void *_suspend)
++{
++	struct suspend *suspend = _suspend;
++	int suspend_cancelled, err;
++	extern void time_resume(void);
 +
++	if (suspend->fast_suspend) {
++		BUG_ON(!irqs_disabled());
++	} else {
++		BUG_ON(irqs_disabled());
 +
-+/* ** Connection ** */
++		for (;;) {
++			err = smp_suspend();
++			if (err)
++				return err;
 +
++			xenbus_suspend();
++			preempt_disable();
 +
-+/**
-+ * Write the physical details regarding the block device to the store, and
-+ * switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+	struct xenbus_transaction xbt;
-+	int err;
-+	struct xenbus_device *dev = be->dev;
++			if (num_online_cpus() == 1)
++				break;
 +
-+	DPRINTK("%s", dev->otherend);
++			preempt_enable();
++			xenbus_suspend_cancel();
++		}
 +
-+	/* Supply the information about the device the frontend needs */
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		return;
++		local_irq_disable();
 +	}
 +
-+	err = blkback_barrier(xbt, be, 1);
-+	if (err)
-+		goto abort;
++	mm_pin_all();
++	gnttab_suspend();
++	pre_suspend();
 +
-+	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
-+			    vbd_size(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/sectors",
-+				 dev->nodename);
-+		goto abort;
-+	}
++	/*
++	 * This hypercall returns 1 if suspend was cancelled or the domain was
++	 * merely checkpointed, and 0 if it is resuming in a new domain.
++	 */
++	suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
 +
-+	/* FIXME: use a typename instead */
-+	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
-+			    vbd_info(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/info",
-+				 dev->nodename);
-+		goto abort;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
-+			    vbd_secsize(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
-+				 dev->nodename);
-+		goto abort;
++	suspend->resume_notifier(suspend_cancelled);
++	post_suspend(suspend_cancelled);
++	gnttab_resume();
++	if (!suspend_cancelled) {
++		irq_resume();
++#ifdef __x86_64__
++		/*
++		 * Older versions of Xen do not save/restore the user %cr3.
++		 * We do it here just in case, but there's no need if we are
++		 * in fast-suspend mode as that implies a new enough Xen.
++		 */
++		if (!suspend->fast_suspend)
++			xen_new_user_pt(__pa(__user_pgd(
++				current->active_mm->pgd)));
++#endif
 +	}
++	time_resume();
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "ending transaction");
-+
-+	err = xenbus_switch_state(dev, XenbusStateConnected);
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "switching to Connected state",
-+				 dev->nodename);
++	if (!suspend->fast_suspend)
++		local_irq_enable();
 +
-+	return;
-+ abort:
-+	xenbus_transaction_end(xbt, 1);
++	return suspend_cancelled;
 +}
 +
-+
-+static int connect_ring(struct backend_info *be)
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int))
 +{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	char protocol[64] = "";
-+	int err;
++	int err, suspend_cancelled;
++	struct suspend suspend;
 +
-+	DPRINTK("%s", dev->otherend);
++	BUG_ON(smp_processor_id() != 0);
++	BUG_ON(in_interrupt());
 +
-+	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
++#if defined(__i386__) || defined(__x86_64__)
++	if (xen_feature(XENFEAT_auto_translated_physmap)) {
++		printk(KERN_WARNING "Cannot suspend in "
++		       "auto_translated_physmap mode.\n");
++		return -EOPNOTSUPP;
 +	}
++#endif
 +
-+	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
-+			    "%63s", protocol, NULL);
-+	if (err)
-+		strcpy(protocol, "unspecified, assuming native");
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+	else {
-+		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-+		return -1;
++	/* If we are definitely UP then 'slow mode' is actually faster. */
++	if (num_possible_cpus() == 1)
++		fast_suspend = 0;
++
++	suspend.fast_suspend = fast_suspend;
++	suspend.resume_notifier = resume_notifier;
++
++	if (fast_suspend) {
++		xenbus_suspend();
++		err = stop_machine_run(take_machine_down, &suspend, 0);
++		if (err < 0)
++			xenbus_suspend_cancel();
++	} else {
++		err = take_machine_down(&suspend);
 +	}
-+	printk(KERN_INFO
-+	       "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-+	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 +
-+	/* Map the shared frame, irq etc. */
-+	err = blkif_map(be->blkif, ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+				 ring_ref, evtchn);
++	if (err < 0)
 +		return err;
++
++	suspend_cancelled = err;
++	if (!suspend_cancelled) {
++		xencons_resume();
++		xenbus_resume();
++	} else {
++		xenbus_suspend_cancel();
 +	}
 +
++	if (!fast_suspend)
++		smp_resume();
++
 +	return 0;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/pci.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/pci.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,67 @@
++/*
++ * vim:shiftwidth=8:noexpandtab
++ */
 +
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <xen/interface/physdev.h>
 +
-+/* ** Driver Registration ** */
++static int (*pci_bus_probe)(struct device *dev);
++static int (*pci_bus_remove)(struct device *dev);
 +
++static int pci_bus_probe_wrapper(struct device *dev)
++{
++	int r;
++	struct pci_dev *pci_dev = to_pci_dev(dev);
++	struct physdev_manage_pci manage_pci;
++	manage_pci.bus = pci_dev->bus->number;
++	manage_pci.devfn = pci_dev->devfn;
++
++	r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, &manage_pci);
++	if (r && r != -ENOSYS)
++		return r;
 +
-+static struct xenbus_device_id blkback_ids[] = {
-+	{ "vbd" },
-+	{ "" }
-+};
++	r = pci_bus_probe(dev);
++	if (r) {
++		int ret;
 +
++		ret = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
++					    &manage_pci);
++		WARN_ON(ret && ret != -ENOSYS);
++	}
 +
-+static struct xenbus_driver blkback = {
-+	.name = "vbd",
-+	.owner = THIS_MODULE,
-+	.ids = blkback_ids,
-+	.probe = blkback_probe,
-+	.remove = blkback_remove,
-+	.otherend_changed = frontend_changed
-+};
++	return r;
++}
 +
++static int pci_bus_remove_wrapper(struct device *dev)
++{
++	int r;
++	struct pci_dev *pci_dev = to_pci_dev(dev);
++	struct physdev_manage_pci manage_pci;
++	manage_pci.bus = pci_dev->bus->number;
++	manage_pci.devfn = pci_dev->devfn;
 +
-+void blkif_xenbus_init(void)
++	r = pci_bus_remove(dev);
++	/* dev and pci_dev are no longer valid!! */
++
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
++		&manage_pci));
++	return r;
++}
++
++static int __init hook_pci_bus(void)
 +{
-+	xenbus_register_backend(&blkback);
++	if (!is_running_on_xen() || !is_initial_xendomain())
++		return 0;
++
++	pci_bus_probe = pci_bus_type.probe;
++	pci_bus_type.probe = pci_bus_probe_wrapper;
++
++	pci_bus_remove = pci_bus_type.remove;
++	pci_bus_type.remove = pci_bus_remove_wrapper;
++
++	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkfront/blkfront.c tmp-linux-2.6-xen.patch/drivers/xen/blkfront/blkfront.c
---- pristine-linux-2.6.18.2/drivers/xen/blkfront/blkfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkfront/blkfront.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,912 @@
-+/******************************************************************************
-+ * blkfront.c
-+ * 
-+ * XenLinux virtual block-device driver.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004, Christian Limpach
-+ * Copyright (c) 2004, Andrew Warfield
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2005, XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
 +
++core_initcall(hook_pci_bus);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/reboot.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/reboot.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,335 @@
++#define __KERNEL_SYSCALLS__
 +#include <linux/version.h>
-+#include "block.h"
-+#include <linux/cdrom.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <scsi/scsi.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/protocols.h>
-+#include <xen/gnttab.h>
++#include <linux/kernel.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
 +#include <asm/hypervisor.h>
-+#include <asm/maddr.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include <linux/kmod.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
 +
 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
 +#include <xen/platform-compat.h>
 +#endif
 +
-+#define BLKIF_STATE_DISCONNECTED 0
-+#define BLKIF_STATE_CONNECTED    1
-+#define BLKIF_STATE_SUSPENDED    2
++MODULE_LICENSE("Dual BSD/GPL");
 +
-+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-+    (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
-+#define GRANT_INVALID_REF	0
++#define SHUTDOWN_INVALID  -1
++#define SHUTDOWN_POWEROFF  0
++#define SHUTDOWN_SUSPEND   2
++#define SHUTDOWN_RESUMING  3
++#define SHUTDOWN_HALT      4
 +
-+static void connect(struct blkfront_info *);
-+static void blkfront_closing(struct xenbus_device *);
-+static int blkfront_remove(struct xenbus_device *);
-+static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
-+static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++/* Ignore multiple shutdown requests. */
++static int shutting_down = SHUTDOWN_INVALID;
 +
-+static void kick_pending_request_queues(struct blkfront_info *);
++/* Was last suspend request cancelled? */
++static int suspend_cancelled;
 +
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+static void blkif_restart_queue(void *arg);
-+static void blkif_recover(struct blkfront_info *);
-+static void blkif_completion(struct blk_shadow *);
-+static void blkif_free(struct blkfront_info *, int);
++/* Can we leave APs online when we suspend? */
++static int fast_suspend;
 +
++static void __shutdown_handler(void *unused);
++static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
 +
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures and the ring buffer for communication with the backend, and
-+ * inform the backend of the appropriate details for those.  Switch to
-+ * Initialised state.
-+ */
-+static int blkfront_probe(struct xenbus_device *dev,
-+			  const struct xenbus_device_id *id)
++static int setup_suspend_evtchn(void);
++
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int));
++
++static int shutdown_process(void *__unused)
 +{
-+	int err, vdevice, i;
-+	struct blkfront_info *info;
++	static char *envp[] = { "HOME=/", "TERM=linux",
++				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
 +
-+	/* FIXME: Use dynamic device id if this is not set. */
-+	err = xenbus_scanf(XBT_NIL, dev->nodename,
-+			   "virtual-device", "%i", &vdevice);
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading virtual-device");
-+		return err;
-+	}
++	extern asmlinkage long sys_reboot(int magic1, int magic2,
++					  unsigned int cmd, void *arg);
 +
-+	info = kzalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+		return -ENOMEM;
++	if ((shutting_down == SHUTDOWN_POWEROFF) ||
++	    (shutting_down == SHUTDOWN_HALT)) {
++		if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
++					envp, 0) < 0) {
++#ifdef CONFIG_XEN
++			sys_reboot(LINUX_REBOOT_MAGIC1,
++				   LINUX_REBOOT_MAGIC2,
++				   LINUX_REBOOT_CMD_POWER_OFF,
++				   NULL);
++#endif /* CONFIG_XEN */
++		}
 +	}
 +
-+	info->xbdev = dev;
-+	info->vdevice = vdevice;
-+	info->connected = BLKIF_STATE_DISCONNECTED;
-+	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++	shutting_down = SHUTDOWN_INVALID; /* could try again */
 +
-+	for (i = 0; i < BLK_RING_SIZE; i++)
-+		info->shadow[i].req.id = i+1;
-+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++	return 0;
++}
 +
-+	/* Front end dir is a number, which is used as the id. */
-+	info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
-+	dev->dev.driver_data = info;
++static void xen_resume_notifier(int _suspend_cancelled)
++{
++	int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING);
++	BUG_ON(old_state != SHUTDOWN_SUSPEND);
++	suspend_cancelled = _suspend_cancelled;
++}
 +
-+	err = talk_to_backend(dev, info);
++static int xen_suspend(void *__unused)
++{
++	int err, old_state;
++
++	daemonize("suspend");
++	err = set_cpus_allowed(current, cpumask_of_cpu(0));
 +	if (err) {
-+		kfree(info);
-+		dev->dev.driver_data = NULL;
-+		return err;
++		printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
++		goto fail;
++	}
++
++	do {
++		err = __xen_suspend(fast_suspend, xen_resume_notifier);
++		if (err) {
++			printk(KERN_ERR "Xen suspend failed (%d)\n", err);
++			goto fail;
++		}
++		if (!suspend_cancelled)
++			setup_suspend_evtchn();
++		old_state = cmpxchg(
++			&shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID);
++	} while (old_state == SHUTDOWN_SUSPEND);
++
++	switch (old_state) {
++	case SHUTDOWN_INVALID:
++	case SHUTDOWN_SUSPEND:
++		BUG();
++	case SHUTDOWN_RESUMING:
++		break;
++	default:
++		schedule_work(&shutdown_work);
++		break;
 +	}
 +
 +	return 0;
-+}
 +
++ fail:
++	old_state = xchg(&shutting_down, SHUTDOWN_INVALID);
++	BUG_ON(old_state != SHUTDOWN_SUSPEND);
++	return 0;
++}
 +
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart.  We tear down our blkif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int blkfront_resume(struct xenbus_device *dev)
++static void switch_shutdown_state(int new_state)
 +{
-+	struct blkfront_info *info = dev->dev.driver_data;
-+	int err;
++	int prev_state, old_state = SHUTDOWN_INVALID;
 +
-+	DPRINTK("blkfront_resume: %s\n", dev->nodename);
-+
-+	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
++	/* We only drive shutdown_state into an active state. */
++	if (new_state == SHUTDOWN_INVALID)
++		return;
 +
-+	err = talk_to_backend(dev, info);
-+	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
-+		blkif_recover(info);
++	do {
++		/* We drop this transition if already in an active state. */
++		if ((old_state != SHUTDOWN_INVALID) &&
++		    (old_state != SHUTDOWN_RESUMING))
++			return;
++		/* Attempt to transition. */
++		prev_state = old_state;
++		old_state = cmpxchg(&shutting_down, old_state, new_state);
++	} while (old_state != prev_state);
 +
-+	return err;
++	/* Either we kick off the work, or we leave it to xen_suspend(). */
++	if (old_state == SHUTDOWN_INVALID)
++		schedule_work(&shutdown_work);
++	else
++		BUG_ON(old_state != SHUTDOWN_RESUMING);
 +}
 +
++static void __shutdown_handler(void *unused)
++{
++	int err;
 +
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+			   struct blkfront_info *info)
++	err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ?
++			    xen_suspend : shutdown_process,
++			    NULL, CLONE_FS | CLONE_FILES);
++
++	if (err < 0) {
++		printk(KERN_WARNING "Error creating shutdown process (%d): "
++		       "retrying...\n", -err);
++		schedule_delayed_work(&shutdown_work, HZ/2);
++	}
++}
++
++static void shutdown_handler(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
 +{
-+	const char *message = NULL;
++	extern void ctrl_alt_del(void);
++	char *str;
 +	struct xenbus_transaction xbt;
-+	int err;
++	int err, new_state = SHUTDOWN_INVALID;
 +
-+	/* Create shared ring, alloc event channel. */
-+	err = setup_blkring(dev, info);
-+	if (err)
-+		goto out;
++	if ((shutting_down != SHUTDOWN_INVALID) &&
++	    (shutting_down != SHUTDOWN_RESUMING))
++		return;
 +
-+again:
++ again:
 +	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_blkring;
-+	}
++	if (err)
++		return;
 +
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "ring-ref","%u", info->ring_ref);
-+	if (err) {
-+		message = "writing ring-ref";
-+		goto abort_transaction;
++	str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
++	/* Ignore read errors and empty reads. */
++	if (XENBUS_IS_ERR_READ(str)) {
++		xenbus_transaction_end(xbt, 1);
++		return;
 +	}
-+	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+			    irq_to_evtchn_port(info->irq));
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
++
++	xenbus_write(xbt, "control", "shutdown", "");
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN) {
++		kfree(str);
++		goto again;
 +	}
-+	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
-+			    XEN_IO_PROTO_ABI_NATIVE);
-+	if (err) {
-+		message = "writing protocol";
-+		goto abort_transaction;
++
++	if (strcmp(str, "poweroff") == 0)
++		new_state = SHUTDOWN_POWEROFF;
++	else if (strcmp(str, "reboot") == 0)
++		ctrl_alt_del();
++	else if (strcmp(str, "suspend") == 0)
++		new_state = SHUTDOWN_SUSPEND;
++	else if (strcmp(str, "halt") == 0)
++		new_state = SHUTDOWN_HALT;
++	else
++		printk("Ignoring shutdown request: %s\n", str);
++
++	switch_shutdown_state(new_state);
++
++	kfree(str);
++}
++
++static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
++			  unsigned int len)
++{
++	char sysrq_key = '\0';
++	struct xenbus_transaction xbt;
++	int err;
++
++ again:
++	err = xenbus_transaction_start(&xbt);
++	if (err)
++		return;
++	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
++		printk(KERN_ERR "Unable to read sysrq code in "
++		       "control/sysrq\n");
++		xenbus_transaction_end(xbt, 1);
++		return;
 +	}
 +
++	if (sysrq_key != '\0')
++		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++
 +	err = xenbus_transaction_end(xbt, 0);
-+	if (err) {
-+		if (err == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_blkring;
-+	}
++	if (err == -EAGAIN)
++		goto again;
 +
-+	xenbus_switch_state(dev, XenbusStateInitialised);
++#ifdef CONFIG_MAGIC_SYSRQ
++	if (sysrq_key != '\0')
++		handle_sysrq(sysrq_key, NULL, NULL);
++#endif
++}
 +
-+	return 0;
++static struct xenbus_watch shutdown_watch = {
++	.node = "control/shutdown",
++	.callback = shutdown_handler
++};
 +
-+ abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	if (message)
-+		xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_blkring:
-+	blkif_free(info, 0);
-+ out:
-+	return err;
++static struct xenbus_watch sysrq_watch = {
++	.node = "control/sysrq",
++	.callback = sysrq_handler
++};
++
++static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs)
++{
++	switch_shutdown_state(SHUTDOWN_SUSPEND);
++	return IRQ_HANDLED;
 +}
 +
++static int setup_suspend_evtchn(void)
++{
++	static int irq;
++	int port;
++	char portstr[16];
 +
-+static int setup_blkring(struct xenbus_device *dev,
-+			 struct blkfront_info *info)
++	if (irq > 0)
++		unbind_from_irqhandler(irq, NULL);
++
++	irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend",
++						NULL);
++	if (irq <= 0)
++		return -1;
++
++	port = irq_to_evtchn_port(irq);
++	printk(KERN_INFO "suspend: event channel %d\n", port);
++	sprintf(portstr, "%d", port);
++	xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr);
++
++	return 0;
++}
++
++static int setup_shutdown_watcher(void)
 +{
-+	blkif_sring_t *sring;
 +	int err;
 +
-+	info->ring_ref = GRANT_INVALID_REF;
++	xenbus_scanf(XBT_NIL, "control",
++		     "platform-feature-multiprocessor-suspend",
++		     "%d", &fast_suspend);
 +
-+	sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-+	if (!sring) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+		return -ENOMEM;
++	err = register_xenbus_watch(&shutdown_watch);
++	if (err) {
++		printk(KERN_ERR "Failed to set shutdown watcher\n");
++		return err;
 +	}
-+	SHARED_RING_INIT(sring);
-+	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 +
-+	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
-+	if (err < 0) {
-+		free_page((unsigned long)sring);
-+		info->ring.sring = NULL;
-+		goto fail;
++	err = register_xenbus_watch(&sysrq_watch);
++	if (err) {
++		printk(KERN_ERR "Failed to set sysrq watcher\n");
++		return err;
 +	}
-+	info->ring_ref = err;
 +
-+	err = bind_listening_port_to_irqhandler(
-+		dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
-+	if (err <= 0) {
-+		xenbus_dev_fatal(dev, err,
-+				 "bind_listening_port_to_irqhandler");
-+		goto fail;
++	/* suspend event channel */
++	err = setup_suspend_evtchn();
++	if (err) {
++		printk(KERN_ERR "Failed to register suspend event channel\n");
++		return err;
 +	}
-+	info->irq = err;
 +
 +	return 0;
-+fail:
-+	blkif_free(info, 0);
-+	return err;
 +}
 +
++#ifdef CONFIG_XEN
 +
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+			    enum xenbus_state backend_state)
++static int shutdown_event(struct notifier_block *notifier,
++			  unsigned long event,
++			  void *data)
 +{
-+	struct blkfront_info *info = dev->dev.driver_data;
-+	struct block_device *bd;
++	setup_shutdown_watcher();
++	return NOTIFY_DONE;
++}
 +
-+	DPRINTK("blkfront:backend_changed.\n");
++static int __init setup_shutdown_event(void)
++{
++	static struct notifier_block xenstore_notifier = {
++		.notifier_call = shutdown_event
++	};
++	register_xenstore_notifier(&xenstore_notifier);
 +
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitWait:
-+	case XenbusStateInitialised:
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		break;
++	return 0;
++}
 +
-+	case XenbusStateConnected:
-+		connect(info);
-+		break;
++subsys_initcall(setup_shutdown_event);
 +
-+	case XenbusStateClosing:
-+		bd = bdget(info->dev);
-+		if (bd == NULL)
-+			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++#else /* !defined(CONFIG_XEN) */
 +
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+		down(&bd->bd_sem);
-+#else
-+		mutex_lock(&bd->bd_mutex);
-+#endif
-+		if (info->users > 0)
-+			xenbus_dev_error(dev, -EBUSY,
-+					 "Device in use; refusing to close");
-+		else
-+			blkfront_closing(dev);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+		up(&bd->bd_sem);
-+#else
-+		mutex_unlock(&bd->bd_mutex);
-+#endif
-+		bdput(bd);
-+		break;
-+	}
++int xen_reboot_init(void)
++{
++	return setup_shutdown_watcher();
 +}
 +
++#endif /* !defined(CONFIG_XEN) */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/smpboot.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/smpboot.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,464 @@
++/*
++ *	Xen SMP booting functions
++ *
++ *	See arch/i386/kernel/smpboot.c for copyright and credits for derived
++ *	portions of this file.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
 +
-+/* ** Connection ** */
++extern int local_setup_timer(unsigned int cpu);
++extern void local_teardown_timer(unsigned int cpu);
 +
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void system_call(void);
++extern void smp_trap_init(trap_info_t *);
 +
-+/*
-+ * Invoked when the backend is finally 'ready' (and has told produced
-+ * the details about the physical device - #sectors, size, etc).
-+ */
-+static void connect(struct blkfront_info *info)
-+{
-+	unsigned long long sectors;
-+	unsigned long sector_size;
-+	unsigned int binfo;
-+	int err;
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
 +
-+	if ((info->connected == BLKIF_STATE_CONNECTED) ||
-+	    (info->connected == BLKIF_STATE_SUSPENDED) )
-+		return;
++cpumask_t cpu_online_map;
++EXPORT_SYMBOL(cpu_online_map);
++cpumask_t cpu_possible_map;
++EXPORT_SYMBOL(cpu_possible_map);
++cpumask_t cpu_initialized_map;
 +
-+	DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_data);
 +
-+	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-+			    "sectors", "%Lu", &sectors,
-+			    "info", "%u", &binfo,
-+			    "sector-size", "%lu", &sector_size,
-+			    NULL);
-+	if (err) {
-+		xenbus_dev_fatal(info->xbdev, err,
-+				 "reading backend fields at %s",
-+				 info->xbdev->otherend);
-+		return;
-+	}
++#ifdef CONFIG_HOTPLUG_CPU
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++#endif
 +
-+	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-+			    "feature-barrier", "%lu", &info->feature_barrier,
-+			    NULL);
-+	if (err)
-+		info->feature_barrier = 0;
++static DEFINE_PER_CPU(int, resched_irq);
++static DEFINE_PER_CPU(int, callfunc_irq);
++static char resched_name[NR_CPUS][15];
++static char callfunc_name[NR_CPUS][15];
 +
-+	err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
-+	if (err) {
-+		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
-+				 info->xbdev->otherend);
-+		return;
-+	}
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
 +
-+	(void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
 +
-+	/* Kick pending requests. */
-+	spin_lock_irq(&blkif_io_lock);
-+	info->connected = BLKIF_STATE_CONNECTED;
-+	kick_pending_request_queues(info);
-+	spin_unlock_irq(&blkif_io_lock);
++#if defined(__i386__)
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++#elif !defined(CONFIG_X86_IO_APIC)
++unsigned int maxcpus = NR_CPUS;
++#endif
 +
-+	add_disk(info->gd);
++void __init prefill_possible_map(void)
++{
++	int i, rc;
 +
-+	info->is_ready = 1;
++	for_each_possible_cpu(i)
++	    if (i != smp_processor_id())
++		return;
++
++	for (i = 0; i < NR_CPUS; i++) {
++		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
++		if (rc >= 0)
++			cpu_set(i, cpu_possible_map);
++	}
 +}
 +
-+/**
-+ * Handle the change of state of the backend to Closing.  We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend.  Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void blkfront_closing(struct xenbus_device *dev)
++void __init smp_alloc_memory(void)
 +{
-+	struct blkfront_info *info = dev->dev.driver_data;
-+	unsigned long flags;
++}
 +
-+	DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++static inline void
++set_cpu_sibling_map(unsigned int cpu)
++{
++	cpu_data[cpu].phys_proc_id = cpu;
++	cpu_data[cpu].cpu_core_id  = 0;
 +
-+	if (info->rq == NULL)
-+		goto out;
++	cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
++	cpu_core_map[cpu]    = cpumask_of_cpu(cpu);
 +
-+	spin_lock_irqsave(&blkif_io_lock, flags);
-+	/* No more blkif_request(). */
-+	blk_stop_queue(info->rq);
-+	/* No more gnttab callback work. */
-+	gnttab_cancel_free_callback(&info->callback);
-+	spin_unlock_irqrestore(&blkif_io_lock, flags);
++	cpu_data[cpu].booted_cores = 1;
++}
 +
-+	/* Flush gnttab callback work. Must be done with no locks held. */
-+	flush_scheduled_work();
++static void
++remove_siblinginfo(unsigned int cpu)
++{
++	cpu_data[cpu].phys_proc_id = BAD_APICID;
++	cpu_data[cpu].cpu_core_id  = BAD_APICID;
 +
-+	xlvbd_del(info);
++	cpus_clear(cpu_sibling_map[cpu]);
++	cpus_clear(cpu_core_map[cpu]);
 +
-+ out:
-+	xenbus_frontend_closed(dev);
++	cpu_data[cpu].booted_cores = 0;
 +}
 +
-+
-+static int blkfront_remove(struct xenbus_device *dev)
++static int __cpuinit xen_smp_intr_init(unsigned int cpu)
 +{
-+	struct blkfront_info *info = dev->dev.driver_data;
++	int rc;
 +
-+	DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++	per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
 +
-+	blkif_free(info, 0);
++	sprintf(resched_name[cpu], "resched%u", cpu);
++	rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
++				    cpu,
++				    smp_reschedule_interrupt,
++				    SA_INTERRUPT,
++				    resched_name[cpu],
++				    NULL);
++	if (rc < 0)
++		goto fail;
++	per_cpu(resched_irq, cpu) = rc;
 +
-+	kfree(info);
++	sprintf(callfunc_name[cpu], "callfunc%u", cpu);
++	rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
++				    cpu,
++				    smp_call_function_interrupt,
++				    SA_INTERRUPT,
++				    callfunc_name[cpu],
++				    NULL);
++	if (rc < 0)
++		goto fail;
++	per_cpu(callfunc_irq, cpu) = rc;
++
++	if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
++		goto fail;
 +
 +	return 0;
++
++ fail:
++	if (per_cpu(resched_irq, cpu) >= 0)
++		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++	if (per_cpu(callfunc_irq, cpu) >= 0)
++		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++	return rc;
 +}
 +
++#ifdef CONFIG_HOTPLUG_CPU
++static void xen_smp_intr_exit(unsigned int cpu)
++{
++	if (cpu != 0)
++		local_teardown_timer(cpu);
 +
-+static inline int GET_ID_FROM_FREELIST(
-+	struct blkfront_info *info)
++	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++}
++#endif
++
++void __cpuinit cpu_bringup(void)
 +{
-+	unsigned long free = info->shadow_free;
-+	BUG_ON(free > BLK_RING_SIZE);
-+	info->shadow_free = info->shadow[free].req.id;
-+	info->shadow[free].req.id = 0x0fffffee; /* debug */
-+	return free;
++	cpu_init();
++	identify_cpu(cpu_data + smp_processor_id());
++	touch_softlockup_watchdog();
++	preempt_disable();
++	local_irq_enable();
 +}
 +
-+static inline void ADD_ID_TO_FREELIST(
-+	struct blkfront_info *info, unsigned long id)
++static void __cpuinit cpu_bringup_and_idle(void)
 +{
-+	info->shadow[id].req.id  = info->shadow_free;
-+	info->shadow[id].request = 0;
-+	info->shadow_free = id;
++	cpu_bringup();
++	cpu_idle();
 +}
 +
-+static inline void flush_requests(struct blkfront_info *info)
++static void __cpuinit cpu_initialize_context(unsigned int cpu)
 +{
-+	int notify;
++	/* vcpu_guest_context_t is too large to allocate on the stack.
++	 * Hence we allocate statically and protect it with a lock */
++	static vcpu_guest_context_t ctxt;
++	static DEFINE_SPINLOCK(ctxt_lock);
 +
-+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++	struct task_struct *idle = idle_task(cpu);
++#ifdef __x86_64__
++	struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
++#else
++	struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
 +
-+	if (notify)
-+		notify_remote_via_irq(info->irq);
-+}
++	if (cpu_test_and_set(cpu, cpu_initialized_map))
++		return;
 +
-+static void kick_pending_request_queues(struct blkfront_info *info)
-+{
-+	if (!RING_FULL(&info->ring)) {
-+		/* Re-enable calldowns. */
-+		blk_start_queue(info->rq);
-+		/* Kick things off immediately. */
-+		do_blkif_request(info->rq);
-+	}
-+}
++	spin_lock(&ctxt_lock);
 +
-+static void blkif_restart_queue(void *arg)
-+{
-+	struct blkfront_info *info = (struct blkfront_info *)arg;
-+	spin_lock_irq(&blkif_io_lock);
-+	if (info->connected == BLKIF_STATE_CONNECTED)
-+		kick_pending_request_queues(info);
-+	spin_unlock_irq(&blkif_io_lock);
-+}
++	memset(&ctxt, 0, sizeof(ctxt));
 +
-+static void blkif_restart_queue_callback(void *arg)
-+{
-+	struct blkfront_info *info = (struct blkfront_info *)arg;
-+	schedule_work(&info->work);
-+}
++	ctxt.flags = VGCF_IN_KERNEL;
++	ctxt.user_regs.ds = __USER_DS;
++	ctxt.user_regs.es = __USER_DS;
++	ctxt.user_regs.fs = 0;
++	ctxt.user_regs.gs = 0;
++	ctxt.user_regs.ss = __KERNEL_DS;
++	ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
++	ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
 +
-+int blkif_open(struct inode *inode, struct file *filep)
-+{
-+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+	info->users++;
-+	return 0;
-+}
++	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
 +
++	smp_trap_init(ctxt.trap_ctxt);
 +
-+int blkif_release(struct inode *inode, struct file *filep)
-+{
-+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+	info->users--;
-+	if (info->users == 0) {
-+		/* Check whether we have been instructed to close.  We will
-+		   have ignored this request initially, as the device was
-+		   still mounted. */
-+		struct xenbus_device * dev = info->xbdev;
-+		enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
++	ctxt.ldt_ents = 0;
 +
-+		if (state == XenbusStateClosing)
-+			blkfront_closing(dev);
-+	}
-+	return 0;
-+}
++	ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
++	ctxt.gdt_ents      = gdt_descr->size / 8;
 +
++#ifdef __i386__
++	ctxt.user_regs.cs = __KERNEL_CS;
++	ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
 +
-+int blkif_ioctl(struct inode *inode, struct file *filep,
-+		unsigned command, unsigned long argument)
-+{
-+	int i;
++	ctxt.kernel_ss = __KERNEL_DS;
++	ctxt.kernel_sp = idle->thread.esp0;
 +
-+	DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-+		      command, (long)argument, inode->i_rdev);
++	ctxt.event_callback_cs     = __KERNEL_CS;
++	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
++	ctxt.failsafe_callback_cs  = __KERNEL_CS;
++	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
 +
-+	switch (command) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+	case HDIO_GETGEO: {
-+		struct block_device *bd = inode->i_bdev;
-+		struct hd_geometry geo;
-+		int ret;
++	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
++#else /* __x86_64__ */
++	ctxt.user_regs.cs = __KERNEL_CS;
++	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
 +
-+                if (!argument)
-+                        return -EINVAL;
++	ctxt.kernel_ss = __KERNEL_DS;
++	ctxt.kernel_sp = idle->thread.rsp0;
 +
-+		geo.start = get_start_sect(bd);
-+		ret = blkif_getgeo(bd, &geo);
-+		if (ret)
-+			return ret;
++	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
++	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++	ctxt.syscall_callback_eip  = (unsigned long)system_call;
 +
-+		if (copy_to_user((struct hd_geometry __user *)argument, &geo,
-+				 sizeof(geo)))
-+                        return -EFAULT;
++	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
 +
-+                return 0;
-+	}
++	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
 +#endif
-+	case CDROMMULTISESSION:
-+		DPRINTK("FIXME: support multisession CDs later\n");
-+		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
-+			if (put_user(0, (char __user *)(argument + i)))
-+				return -EFAULT;
-+		return 0;
 +
-+	default:
-+		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-+		  command);*/
-+		return -EINVAL; /* same return as native Linux */
-+	}
++	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt))
++		BUG();
 +
-+	return 0;
++	spin_unlock(&ctxt_lock);
 +}
 +
-+
-+int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
++void __init smp_prepare_cpus(unsigned int max_cpus)
 +{
-+	/* We don't have real geometry info, but let's at least return
-+	   values consistent with the size of the device */
-+	sector_t nsect = get_capacity(bd->bd_disk);
-+	sector_t cylinders = nsect;
++	unsigned int cpu;
++	struct task_struct *idle;
++	int apicid, acpiid;
++	struct vcpu_get_physid cpu_id;
++#ifdef __x86_64__
++	struct desc_ptr *gdt_descr;
++#else
++	struct Xgt_desc_struct *gdt_descr;
++#endif
 +
-+	hg->heads = 0xff;
-+	hg->sectors = 0x3f;
-+	sector_div(cylinders, hg->heads * hg->sectors);
-+	hg->cylinders = cylinders;
-+	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
-+		hg->cylinders = 0xffff;
-+	return 0;
-+}
++	apicid = 0;
++	if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) {
++		apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++		acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++		if (acpiid != 0xff)
++			x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++	}
++	boot_cpu_data.apicid = apicid;
++	cpu_data[0] = boot_cpu_data;
 +
++	cpu_2_logical_apicid[0] = apicid;
++	x86_cpu_to_apicid[0] = apicid;
 +
-+/*
-+ * blkif_queue_request
-+ *
-+ * request block io
-+ *
-+ * id: for guest use only.
-+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
-+ * buffer: buffer to read/write into. this should be a
-+ *   virtual address in the guest os.
-+ */
-+static int blkif_queue_request(struct request *req)
-+{
-+	struct blkfront_info *info = req->rq_disk->private_data;
-+	unsigned long buffer_mfn;
-+	blkif_request_t *ring_req;
-+	struct bio *bio;
-+	struct bio_vec *bvec;
-+	int idx;
-+	unsigned long id;
-+	unsigned int fsect, lsect;
-+	int ref;
-+	grant_ref_t gref_head;
++	current_thread_info()->cpu = 0;
 +
-+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
-+		return 1;
++	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++		cpus_clear(cpu_sibling_map[cpu]);
++		cpus_clear(cpu_core_map[cpu]);
++	}
 +
-+	if (gnttab_alloc_grant_references(
-+		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
-+		gnttab_request_free_callback(
-+			&info->callback,
-+			blkif_restart_queue_callback,
-+			info,
-+			BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+		return 1;
++	set_cpu_sibling_map(0);
++
++	if (xen_smp_intr_init(0))
++		BUG();
++
++	cpu_initialized_map = cpumask_of_cpu(0);
++
++	/* Restrict the possible_map according to max_cpus. */
++	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
++		for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
++			continue;
++		cpu_clear(cpu, cpu_possible_map);
 +	}
 +
-+	/* Fill out a communications ring structure. */
-+	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
-+	id = GET_ID_FROM_FREELIST(info);
-+	info->shadow[id].request = (unsigned long)req;
++	for_each_possible_cpu (cpu) {
++		if (cpu == 0)
++			continue;
 +
-+	ring_req->id = id;
-+	ring_req->sector_number = (blkif_sector_t)req->sector;
-+	ring_req->handle = info->handle;
++#ifdef __x86_64__
++		gdt_descr = &cpu_gdt_descr[cpu];
++#else
++		gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++		gdt_descr->address = get_zeroed_page(GFP_KERNEL);
++		if (unlikely(!gdt_descr->address)) {
++			printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
++			       cpu);
++			continue;
++		}
++		gdt_descr->size = GDT_SIZE;
++		memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++		make_page_readonly(
++			(void *)gdt_descr->address,
++			XENFEAT_writable_descriptor_tables);
 +
-+	ring_req->operation = rq_data_dir(req) ?
-+		BLKIF_OP_WRITE : BLKIF_OP_READ;
-+	if (blk_barrier_rq(req))
-+		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
++		apicid = cpu;
++		if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) {
++			apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++			acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++			if (acpiid != 0xff)
++				x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++		}
++		cpu_data[cpu] = boot_cpu_data;
++		cpu_data[cpu].apicid = apicid;
 +
-+	ring_req->nr_segments = 0;
-+	rq_for_each_bio (bio, req) {
-+		bio_for_each_segment (bvec, bio, idx) {
-+			BUG_ON(ring_req->nr_segments
-+			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
-+			fsect = bvec->bv_offset >> 9;
-+			lsect = fsect + (bvec->bv_len >> 9) - 1;
-+			/* install a grant reference. */
-+			ref = gnttab_claim_grant_reference(&gref_head);
-+			BUG_ON(ref == -ENOSPC);
++		cpu_2_logical_apicid[cpu] = apicid;
++		x86_cpu_to_apicid[cpu] = apicid;
 +
-+			gnttab_grant_foreign_access_ref(
-+				ref,
-+				info->xbdev->otherend_id,
-+				buffer_mfn,
-+				rq_data_dir(req) );
++		idle = fork_idle(cpu);
++		if (IS_ERR(idle))
++			panic("failed fork for CPU %d", cpu);
 +
-+			info->shadow[id].frame[ring_req->nr_segments] =
-+				mfn_to_pfn(buffer_mfn);
++#ifdef __x86_64__
++		cpu_pda(cpu)->pcurrent = idle;
++		cpu_pda(cpu)->cpunumber = cpu;
++		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++#endif
 +
-+			ring_req->seg[ring_req->nr_segments] =
-+				(struct blkif_request_segment) {
-+					.gref       = ref,
-+					.first_sect = fsect,
-+					.last_sect  = lsect };
++		irq_ctx_init(cpu);
 +
-+			ring_req->nr_segments++;
-+		}
++#ifdef CONFIG_HOTPLUG_CPU
++		if (is_initial_xendomain())
++			cpu_set(cpu, cpu_present_map);
++#else
++		cpu_set(cpu, cpu_present_map);
++#endif
 +	}
 +
-+	info->ring.req_prod_pvt++;
++	init_xenbus_allowed_cpumask();
 +
-+	/* Keep a private copy so we can reissue requests when recovering. */
-+	info->shadow[id].req = *ring_req;
++#ifdef CONFIG_X86_IO_APIC
++	/*
++	 * Here we can be sure that there is an IO-APIC in the system. Let's
++	 * go and set it up:
++	 */
++	if (!skip_ioapic_setup && nr_ioapics)
++		setup_IO_APIC();
++#endif
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++	prefill_possible_map();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
++ * But do it early enough to catch critical for_each_present_cpu() loops
++ * in i386-specific code.
++ */
++static int __init initialize_cpu_present_map(void)
++{
++	cpu_present_map = cpu_possible_map;
++	return 0;
++}
++core_initcall(initialize_cpu_present_map);
++
++int __cpu_disable(void)
++{
++	cpumask_t map = cpu_online_map;
++	unsigned int cpu = smp_processor_id();
++
++	if (cpu == 0)
++		return -EBUSY;
++
++	remove_siblinginfo(cpu);
 +
-+	gnttab_free_grant_references(gref_head);
++	cpu_clear(cpu, map);
++	fixup_irqs(map);
++	cpu_clear(cpu, cpu_online_map);
 +
 +	return 0;
 +}
 +
-+/*
-+ * do_blkif_request
-+ *  read a block; request is in a request queue
-+ */
-+void do_blkif_request(request_queue_t *rq)
++void __cpu_die(unsigned int cpu)
 +{
-+	struct blkfront_info *info = NULL;
-+	struct request *req;
-+	int queued;
++	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
++		current->state = TASK_UNINTERRUPTIBLE;
++		schedule_timeout(HZ/10);
++	}
 +
-+	DPRINTK("Entered do_blkif_request\n");
++	xen_smp_intr_exit(cpu);
 +
-+	queued = 0;
++	if (num_online_cpus() == 1)
++		alternatives_smp_switch(0);
++}
 +
-+	while ((req = elv_next_request(rq)) != NULL) {
-+		info = req->rq_disk->private_data;
-+		if (!blk_fs_request(req)) {
-+			end_request(req, 0);
-+			continue;
-+		}
++#endif /* CONFIG_HOTPLUG_CPU */
 +
-+		if (RING_FULL(&info->ring))
-+			goto wait;
++int __cpuinit __cpu_up(unsigned int cpu)
++{
++	int rc;
 +
-+		DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
-+			"(%u/%li) buffer:%p [%s]\n",
-+			req, req->cmd, (long long)req->sector,
-+			req->current_nr_sectors,
-+			req->nr_sectors, req->buffer,
-+			rq_data_dir(req) ? "write" : "read");
++	rc = cpu_up_check(cpu);
++	if (rc)
++		return rc;
 +
++	cpu_initialize_context(cpu);
 +
-+		blkdev_dequeue_request(req);
-+		if (blkif_queue_request(req)) {
-+			blk_requeue_request(rq, req);
-+		wait:
-+			/* Avoid pointless unplugs. */
-+			blk_stop_queue(rq);
-+			break;
-+		}
++	if (num_online_cpus() == 1)
++		alternatives_smp_switch(1);
 +
-+		queued++;
-+	}
++	/* This must be done before setting cpu_online_map */
++	set_cpu_sibling_map(cpu);
++	wmb();
 +
-+	if (queued != 0)
-+		flush_requests(info);
-+}
++	rc = xen_smp_intr_init(cpu);
++	if (rc) {
++		remove_siblinginfo(cpu);
++		return rc;
++	}
 +
++	cpu_set(cpu, cpu_online_map);
 +
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-+{
-+	struct request *req;
-+	blkif_response_t *bret;
-+	RING_IDX i, rp;
-+	unsigned long flags;
-+	struct blkfront_info *info = (struct blkfront_info *)dev_id;
-+	int uptodate;
++	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
++	BUG_ON(rc);
 +
-+	spin_lock_irqsave(&blkif_io_lock, flags);
++	return 0;
++}
 +
-+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-+		spin_unlock_irqrestore(&blkif_io_lock, flags);
-+		return IRQ_HANDLED;
-+	}
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++}
 +
-+ again:
-+	rp = info->ring.sring->rsp_prod;
-+	rmb(); /* Ensure we see queued responses up to 'rp'. */
++#ifndef CONFIG_X86_LOCAL_APIC
++int setup_profiling_timer(unsigned int multiplier)
++{
++	return -EINVAL;
++}
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/xen_proc.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/xen_proc.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,23 @@
 +
-+	for (i = info->ring.rsp_cons; i != rp; i++) {
-+		unsigned long id;
-+		int ret;
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <xen/xen_proc.h>
 +
-+		bret = RING_GET_RESPONSE(&info->ring, i);
-+		id   = bret->id;
-+		req  = (struct request *)info->shadow[id].request;
++static struct proc_dir_entry *xen_base;
 +
-+		blkif_completion(&info->shadow[id]);
++struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
++{
++	if ( xen_base == NULL )
++		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
++			panic("Couldn't create /proc/xen");
++	return create_proc_entry(name, mode, xen_base);
++}
 +
-+		ADD_ID_TO_FREELIST(info, id);
++EXPORT_SYMBOL_GPL(create_xen_proc_entry); 
 +
-+		uptodate = (bret->status == BLKIF_RSP_OKAY);
-+		switch (bret->operation) {
-+		case BLKIF_OP_WRITE_BARRIER:
-+			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-+				printk("blkfront: %s: write barrier op failed\n",
-+				       info->gd->disk_name);
-+				uptodate = -EOPNOTSUPP;
-+				info->feature_barrier = 0;
-+			        xlvbd_barrier(info);
-+			}
-+			/* fall through */
-+		case BLKIF_OP_READ:
-+		case BLKIF_OP_WRITE:
-+			if (unlikely(bret->status != BLKIF_RSP_OKAY))
-+				DPRINTK("Bad return from blkdev data "
-+					"request: %x\n", bret->status);
++void remove_xen_proc_entry(const char *name)
++{
++	remove_proc_entry(name, xen_base);
++}
 +
-+			ret = end_that_request_first(req, uptodate,
-+				req->hard_nr_sectors);
-+			BUG_ON(ret);
-+			end_that_request_last(req, uptodate);
-+			break;
-+		default:
-+			BUG();
-+		}
-+	}
++EXPORT_SYMBOL_GPL(remove_xen_proc_entry); 
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/xen_sysfs.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/xen_sysfs.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,420 @@
++/*
++ *  copyright (c) 2006 IBM Corporation
++ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License version 2 as
++ *  published by the Free Software Foundation.
++ */
 +
-+	info->ring.rsp_cons = i;
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++#include <xen/hypervisor_sysfs.h>
++#include <xen/xenbus.h>
++#include <xen/interface/kexec.h>
 +
-+	if (i != info->ring.req_prod_pvt) {
-+		int more_to_do;
-+		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
-+		if (more_to_do)
-+			goto again;
-+	} else
-+		info->ring.sring->rsp_event = i + 1;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Mike D. Day <ncmike at us.ibm.com>");
 +
-+	kick_pending_request_queues(info);
++static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	return sprintf(buffer, "xen\n");
++}
 +
-+	spin_unlock_irqrestore(&blkif_io_lock, flags);
++HYPERVISOR_ATTR_RO(type);
 +
-+	return IRQ_HANDLED;
++static int __init xen_sysfs_type_init(void)
++{
++	return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
 +}
 +
-+static void blkif_free(struct blkfront_info *info, int suspend)
++static void xen_sysfs_type_destroy(void)
 +{
-+	/* Prevent new requests being issued until we fix things up. */
-+	spin_lock_irq(&blkif_io_lock);
-+	info->connected = suspend ?
-+		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
-+	/* No more blkif_request(). */
-+	if (info->rq)
-+		blk_stop_queue(info->rq);
-+	/* No more gnttab callback work. */
-+	gnttab_cancel_free_callback(&info->callback);
-+	spin_unlock_irq(&blkif_io_lock);
-+
-+	/* Flush gnttab callback work. Must be done with no locks held. */
-+	flush_scheduled_work();
-+
-+	/* Free resources associated with old device channel. */
-+	if (info->ring_ref != GRANT_INVALID_REF) {
-+		gnttab_end_foreign_access(info->ring_ref, 0,
-+					  (unsigned long)info->ring.sring);
-+		info->ring_ref = GRANT_INVALID_REF;
-+		info->ring.sring = NULL;
-+	}
-+	if (info->irq)
-+		unbind_from_irqhandler(info->irq, info);
-+	info->irq = 0;
++	sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
 +}
 +
-+static void blkif_completion(struct blk_shadow *s)
++/* xen version attributes */
++static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
 +{
-+	int i;
-+	for (i = 0; i < s->req.nr_segments; i++)
-+		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
++	int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++	if (version)
++		return sprintf(buffer, "%d\n", version >> 16);
++	return -ENODEV;
 +}
 +
-+static void blkif_recover(struct blkfront_info *info)
-+{
-+	int i;
-+	blkif_request_t *req;
-+	struct blk_shadow *copy;
-+	int j;
++HYPERVISOR_ATTR_RO(major);
 +
-+	/* Stage 1: Make a safe copy of the shadow state. */
-+	copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL);
-+	memcpy(copy, info->shadow, sizeof(info->shadow));
++static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++	if (version)
++		return sprintf(buffer, "%d\n", version & 0xff);
++	return -ENODEV;
++}
 +
-+	/* Stage 2: Set up free list. */
-+	memset(&info->shadow, 0, sizeof(info->shadow));
-+	for (i = 0; i < BLK_RING_SIZE; i++)
-+		info->shadow[i].req.id = i+1;
-+	info->shadow_free = info->ring.req_prod_pvt;
-+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++HYPERVISOR_ATTR_RO(minor);
 +
-+	/* Stage 3: Find pending requests and requeue them. */
-+	for (i = 0; i < BLK_RING_SIZE; i++) {
-+		/* Not in use? */
-+		if (copy[i].request == 0)
-+			continue;
++static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int ret = -ENOMEM;
++	char *extra;
 +
-+		/* Grab a request slot and copy shadow state into it. */
-+		req = RING_GET_REQUEST(
-+			&info->ring, info->ring.req_prod_pvt);
-+		*req = copy[i].req;
++	extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
++	if (extra) {
++		ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", extra);
++		kfree(extra);
++	}
 +
-+		/* We get a new request id, and must reset the shadow state. */
-+		req->id = GET_ID_FROM_FREELIST(info);
-+		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
++	return ret;
++}
 +
-+		/* Rewrite any grant references invalidated by susp/resume. */
-+		for (j = 0; j < req->nr_segments; j++)
-+			gnttab_grant_foreign_access_ref(
-+				req->seg[j].gref,
-+				info->xbdev->otherend_id,
-+				pfn_to_mfn(info->shadow[req->id].frame[j]),
-+				rq_data_dir(
-+					(struct request *)
-+					info->shadow[req->id].request));
-+		info->shadow[req->id].req = *req;
++HYPERVISOR_ATTR_RO(extra);
 +
-+		info->ring.req_prod_pvt++;
-+	}
++static struct attribute *version_attrs[] = {
++	&major_attr.attr,
++	&minor_attr.attr,
++	&extra_attr.attr,
++	NULL
++};
 +
-+	kfree(copy);
++static struct attribute_group version_group = {
++	.name = "version",
++	.attrs = version_attrs,
++};
 +
-+	(void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++static int __init xen_sysfs_version_init(void)
++{
++	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++				  &version_group);
++}
 +
-+	spin_lock_irq(&blkif_io_lock);
++static void xen_sysfs_version_destroy(void)
++{
++	sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
++}
 +
-+	/* Now safe for us to use the shared ring */
-+	info->connected = BLKIF_STATE_CONNECTED;
++/* UUID */
 +
-+	/* Send off requeued requests */
-+	flush_requests(info);
++static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	char *vm, *val;
++	int ret;
++	extern int xenstored_ready;
 +
-+	/* Kick any other new requests queued since we resumed */
-+	kick_pending_request_queues(info);
++	if (!xenstored_ready)
++		return -EBUSY;
 +
-+	spin_unlock_irq(&blkif_io_lock);
++	vm = xenbus_read(XBT_NIL, "vm", "", NULL);
++	if (IS_ERR(vm))
++		return PTR_ERR(vm);
++	val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
++	kfree(vm);
++	if (IS_ERR(val))
++		return PTR_ERR(val);
++	ret = sprintf(buffer, "%s\n", val);
++	kfree(val);
++	return ret;
 +}
 +
-+int blkfront_is_ready(struct xenbus_device *dev)
-+{
-+	struct blkfront_info *info = dev->dev.driver_data;
++HYPERVISOR_ATTR_RO(uuid);
 +
-+	return info->is_ready;
++static int __init xen_sysfs_uuid_init(void)
++{
++	return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
 +}
 +
++static void xen_sysfs_uuid_destroy(void)
++{
++	sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
 +
-+/* ** Driver Registration ** */
++/* xen compilation attributes */
 +
++static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int ret = -ENOMEM;
++	struct xen_compile_info *info;
 +
-+static struct xenbus_device_id blkfront_ids[] = {
-+	{ "vbd" },
-+	{ "" }
-+};
-+MODULE_ALIAS("xen:vbd");
++	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++	if (info) {
++		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", info->compiler);
++		kfree(info);
++	}
 +
-+static struct xenbus_driver blkfront = {
-+	.name = "vbd",
-+	.owner = THIS_MODULE,
-+	.ids = blkfront_ids,
-+	.probe = blkfront_probe,
-+	.remove = blkfront_remove,
-+	.resume = blkfront_resume,
-+	.otherend_changed = backend_changed,
-+	.is_ready = blkfront_is_ready,
-+};
++	return ret;
++}
 +
++HYPERVISOR_ATTR_RO(compiler);
 +
-+static int __init xlblk_init(void)
++static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	int ret = -ENOMEM;
++	struct xen_compile_info *info;
 +
-+	return xenbus_register_frontend(&blkfront);
++	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++	if (info) {
++		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", info->compile_by);
++		kfree(info);
++	}
++
++	return ret;
 +}
-+module_init(xlblk_init);
 +
++HYPERVISOR_ATTR_RO(compiled_by);
 +
-+static void xlblk_exit(void)
++static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
 +{
-+	return xenbus_unregister_driver(&blkfront);
-+}
-+module_exit(xlblk_exit);
++	int ret = -ENOMEM;
++	struct xen_compile_info *info;
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkfront/block.h tmp-linux-2.6-xen.patch/drivers/xen/blkfront/block.h
---- pristine-linux-2.6.18.2/drivers/xen/blkfront/block.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkfront/block.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,143 @@
-+/******************************************************************************
-+ * block.h
-+ * 
-+ * Shared definitions between all levels of XenLinux Virtual block devices.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++	if (info) {
++		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", info->compile_date);
++		kfree(info);
++	}
 +
-+#ifndef __XEN_DRIVERS_BLOCK_H__
-+#define __XEN_DRIVERS_BLOCK_H__
++	return ret;
++}
 +
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/hdreg.h>
-+#include <linux/blkdev.h>
-+#include <linux/major.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/gnttab.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/ring.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/uaccess.h>
++HYPERVISOR_ATTR_RO(compile_date);
 +
-+#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
++static struct attribute *xen_compile_attrs[] = {
++	&compiler_attr.attr,
++	&compiled_by_attr.attr,
++	&compile_date_attr.attr,
++	NULL
++};
 +
-+#if 0
-+#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
-+#else
-+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-+#endif
++static struct attribute_group xen_compilation_group = {
++	.name = "compilation",
++	.attrs = xen_compile_attrs,
++};
 +
-+struct xlbd_type_info
++int __init static xen_compilation_init(void)
 +{
-+	int partn_shift;
-+	int disks_per_major;
-+	char *devname;
-+	char *diskname;
-+};
++	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++				  &xen_compilation_group);
++}
 +
-+struct xlbd_major_info
++static void xen_compilation_destroy(void)
 +{
-+	int major;
-+	int index;
-+	int usage;
-+	struct xlbd_type_info *type;
-+};
++	sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++			   &xen_compilation_group);
++}
 +
-+struct blk_shadow {
-+	blkif_request_t req;
-+	unsigned long request;
-+	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
++/* xen properties info */
++
++static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int ret = -ENOMEM;
++	char *caps;
++
++	caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
++	if (caps) {
++		ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", caps);
++		kfree(caps);
++	}
 +
-+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++	return ret;
++}
 +
-+/*
-+ * We have one of these per vbd, whether ide, scsi or 'other'.  They
-+ * hang in private_data off the gendisk structure. We may end up
-+ * putting all kinds of interesting stuff here :-)
-+ */
-+struct blkfront_info
++HYPERVISOR_ATTR_RO(capabilities);
++
++static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
 +{
-+	struct xenbus_device *xbdev;
-+	dev_t dev;
-+ 	struct gendisk *gd;
-+	int vdevice;
-+	blkif_vdev_t handle;
-+	int connected;
-+	int ring_ref;
-+	blkif_front_ring_t ring;
-+	unsigned int irq;
-+	struct xlbd_major_info *mi;
-+	request_queue_t *rq;
-+	struct work_struct work;
-+	struct gnttab_free_callback callback;
-+	struct blk_shadow shadow[BLK_RING_SIZE];
-+	unsigned long shadow_free;
-+	int feature_barrier;
-+	int is_ready;
++	int ret = -ENOMEM;
++	char *cset;
 +
-+	/**
-+	 * The number of people holding this device open.  We won't allow a
-+	 * hot-unplug unless this is 0.
-+	 */
-+	int users;
-+};
++	cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
++	if (cset) {
++		ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
++		if (!ret)
++			ret = sprintf(buffer, "%s\n", cset);
++		kfree(cset);
++	}
 +
-+extern spinlock_t blkif_io_lock;
++	return ret;
++}
 +
-+extern int blkif_open(struct inode *inode, struct file *filep);
-+extern int blkif_release(struct inode *inode, struct file *filep);
-+extern int blkif_ioctl(struct inode *inode, struct file *filep,
-+		       unsigned command, unsigned long argument);
-+extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
-+extern int blkif_check(dev_t dev);
-+extern int blkif_revalidate(dev_t dev);
-+extern void do_blkif_request (request_queue_t *rq);
++HYPERVISOR_ATTR_RO(changeset);
 +
-+/* Virtual block-device subsystem. */
-+/* Note that xlvbd_add doesn't call add_disk for you: you're expected
-+   to call add_disk on info->gd once the disk is properly connected
-+   up. */
-+int xlvbd_add(blkif_sector_t capacity, int device,
-+	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
-+void xlvbd_del(struct blkfront_info *info);
-+int xlvbd_barrier(struct blkfront_info *info);
++static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int ret = -ENOMEM;
++	struct xen_platform_parameters *parms;
 +
-+#endif /* __XEN_DRIVERS_BLOCK_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkfront/Makefile tmp-linux-2.6-xen.patch/drivers/xen/blkfront/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/blkfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkfront/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,5 @@
++	parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
++	if (parms) {
++		ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
++					     parms);
++		if (!ret)
++			ret = sprintf(buffer, "%lx\n", parms->virt_start);
++		kfree(parms);
++	}
 +
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	:= xenblk.o
++	return ret;
++}
 +
-+xenblk-objs := blkfront.o vbd.o
++HYPERVISOR_ATTR_RO(virtual_start);
 +
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blkfront/vbd.c tmp-linux-2.6-xen.patch/drivers/xen/blkfront/vbd.c
---- pristine-linux-2.6.18.2/drivers/xen/blkfront/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blkfront/vbd.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,375 @@
-+/******************************************************************************
-+ * vbd.c
-+ * 
-+ * XenLinux virtual block-device driver (xvd).
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	int ret;
 +
-+#include "block.h"
-+#include <linux/blkdev.h>
-+#include <linux/list.h>
++	ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
++	if (ret > 0)
++		ret = sprintf(buffer, "%x\n", ret);
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++	return ret;
++}
 +
-+#define BLKIF_MAJOR(dev) ((dev)>>8)
-+#define BLKIF_MINOR(dev) ((dev) & 0xff)
++HYPERVISOR_ATTR_RO(pagesize);
 +
-+/*
-+ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
-+ * potentially combinations of the two) in the naming scheme and in a few other
-+ * places.
-+ */
++/* eventually there will be several more features to export */
++static ssize_t xen_feature_show(int index, char *buffer)
++{
++	int ret = -ENOMEM;
++	struct xen_feature_info *info;
 +
-+#define NUM_IDE_MAJORS 10
-+#define NUM_SCSI_MAJORS 17
-+#define NUM_VBD_MAJORS 1
++	info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
++	if (info) {
++		info->submap_idx = index;
++		ret = HYPERVISOR_xen_version(XENVER_get_features, info);
++		if (!ret)
++			ret = sprintf(buffer, "%d\n", info->submap);
++		kfree(info);
++	}
 +
-+static struct xlbd_type_info xlbd_ide_type = {
-+	.partn_shift = 6,
-+	.disks_per_major = 2,
-+	.devname = "ide",
-+	.diskname = "hd",
-+};
++	return ret;
++}
 +
-+static struct xlbd_type_info xlbd_scsi_type = {
-+	.partn_shift = 4,
-+	.disks_per_major = 16,
-+	.devname = "sd",
-+	.diskname = "sd",
-+};
++static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++	return xen_feature_show(XENFEAT_writable_page_tables, buffer);
++}
 +
-+static struct xlbd_type_info xlbd_vbd_type = {
-+	.partn_shift = 4,
-+	.disks_per_major = 16,
-+	.devname = "xvd",
-+	.diskname = "xvd",
++HYPERVISOR_ATTR_RO(writable_pt);
++
++static struct attribute *xen_properties_attrs[] = {
++	&capabilities_attr.attr,
++	&changeset_attr.attr,
++	&virtual_start_attr.attr,
++	&pagesize_attr.attr,
++	&writable_pt_attr.attr,
++	NULL
 +};
 +
-+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-+					 NUM_VBD_MAJORS];
++static struct attribute_group xen_properties_group = {
++	.name = "properties",
++	.attrs = xen_properties_attrs,
++};
 +
-+#define XLBD_MAJOR_IDE_START	0
-+#define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
-+#define XLBD_MAJOR_VBD_START	(NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
++static int __init xen_properties_init(void)
++{
++	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++				  &xen_properties_group);
++}
 +
-+#define XLBD_MAJOR_IDE_RANGE	XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
-+#define XLBD_MAJOR_SCSI_RANGE	XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
-+#define XLBD_MAJOR_VBD_RANGE	XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
++static void xen_properties_destroy(void)
++{
++	sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++			   &xen_properties_group);
++}
 +
-+/* Information about our VBDs. */
-+#define MAX_VBDS 64
-+static LIST_HEAD(vbds_list);
++#ifdef CONFIG_KEXEC
 +
-+static struct block_device_operations xlvbd_block_fops =
++static ssize_t vmcoreinfo_show(struct hyp_sysfs_attr *attr, char *page)
 +{
-+	.owner = THIS_MODULE,
-+	.open = blkif_open,
-+	.release = blkif_release,
-+	.ioctl  = blkif_ioctl,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+	.getgeo = blkif_getgeo
-+#endif
-+};
++	extern size_t vmcoreinfo_size_xen;
++	extern unsigned long paddr_vmcoreinfo_xen;
 +
-+DEFINE_SPINLOCK(blkif_io_lock);
++	return sprintf(page, "%lx %zx\n",
++		paddr_vmcoreinfo_xen, vmcoreinfo_size_xen);
++}
 +
-+static struct xlbd_major_info *
-+xlbd_alloc_major_info(int major, int minor, int index)
-+{
-+	struct xlbd_major_info *ptr;
++HYPERVISOR_ATTR_RO(vmcoreinfo);
 +
-+	ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-+	if (ptr == NULL)
-+		return NULL;
++static int __init xen_sysfs_vmcoreinfo_init(void)
++{
++	return sysfs_create_file(&hypervisor_subsys.kset.kobj,
++				 &vmcoreinfo_attr.attr);
++}
 +
-+	ptr->major = major;
++static void xen_sysfs_vmcoreinfo_destroy(void)
++{
++	sysfs_remove_file(&hypervisor_subsys.kset.kobj, &vmcoreinfo_attr.attr);
++}
 +
-+	switch (index) {
-+	case XLBD_MAJOR_IDE_RANGE:
-+		ptr->type = &xlbd_ide_type;
-+		ptr->index = index - XLBD_MAJOR_IDE_START;
-+		break;
-+	case XLBD_MAJOR_SCSI_RANGE:
-+		ptr->type = &xlbd_scsi_type;
-+		ptr->index = index - XLBD_MAJOR_SCSI_START;
-+		break;
-+	case XLBD_MAJOR_VBD_RANGE:
-+		ptr->type = &xlbd_vbd_type;
-+		ptr->index = index - XLBD_MAJOR_VBD_START;
-+		break;
-+	}
++#else
 +
-+	if (register_blkdev(ptr->major, ptr->type->devname)) {
-+		kfree(ptr);
-+		return NULL;
-+	}
++#define xen_sysfs_vmcoreinfo_init()	0
++#define xen_sysfs_vmcoreinfo_destroy()	((void)0)
 +
-+	printk("xen-vbd: registered block device major %i\n", ptr->major);
-+	major_info[index] = ptr;
-+	return ptr;
-+}
++#endif
 +
-+static struct xlbd_major_info *
-+xlbd_get_major_info(int vdevice)
++static int __init hyper_sysfs_init(void)
 +{
-+	struct xlbd_major_info *mi;
-+	int major, minor, index;
++	int ret;
 +
-+	major = BLKIF_MAJOR(vdevice);
-+	minor = BLKIF_MINOR(vdevice);
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	switch (major) {
-+	case IDE0_MAJOR: index = 0; break;
-+	case IDE1_MAJOR: index = 1; break;
-+	case IDE2_MAJOR: index = 2; break;
-+	case IDE3_MAJOR: index = 3; break;
-+	case IDE4_MAJOR: index = 4; break;
-+	case IDE5_MAJOR: index = 5; break;
-+	case IDE6_MAJOR: index = 6; break;
-+	case IDE7_MAJOR: index = 7; break;
-+	case IDE8_MAJOR: index = 8; break;
-+	case IDE9_MAJOR: index = 9; break;
-+	case SCSI_DISK0_MAJOR: index = 10; break;
-+	case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-+		index = 11 + major - SCSI_DISK1_MAJOR;
-+		break;
-+        case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
-+                index = 18 + major - SCSI_DISK8_MAJOR;
-+                break;
-+        case SCSI_CDROM_MAJOR: index = 26; break;
-+        default: index = 27; break;
-+	}
++	ret = xen_sysfs_type_init();
++	if (ret)
++		goto out;
++	ret = xen_sysfs_version_init();
++	if (ret)
++		goto version_out;
++	ret = xen_compilation_init();
++	if (ret)
++		goto comp_out;
++	ret = xen_sysfs_uuid_init();
++	if (ret)
++		goto uuid_out;
++	ret = xen_properties_init();
++	if (ret)
++		goto prop_out;
++	ret = xen_sysfs_vmcoreinfo_init();
++	if (!ret)
++		goto out;
 +
-+	mi = ((major_info[index] != NULL) ? major_info[index] :
-+	      xlbd_alloc_major_info(major, minor, index));
-+	if (mi)
-+		mi->usage++;
-+	return mi;
++	xen_properties_destroy();
++prop_out:
++	xen_sysfs_uuid_destroy();
++uuid_out:
++	xen_compilation_destroy();
++comp_out:
++	xen_sysfs_version_destroy();
++version_out:
++	xen_sysfs_type_destroy();
++out:
++	return ret;
 +}
 +
-+static void
-+xlbd_put_major_info(struct xlbd_major_info *mi)
++static void __exit hyper_sysfs_exit(void)
 +{
-+	mi->usage--;
-+	/* XXX: release major if 0 */
++	xen_sysfs_vmcoreinfo_destroy();
++	xen_properties_destroy();
++	xen_compilation_destroy();
++	xen_sysfs_uuid_destroy();
++	xen_sysfs_version_destroy();
++	xen_sysfs_type_destroy();
++
 +}
 +
-+static int
-+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
-+{
-+	request_queue_t *rq;
++module_init(hyper_sysfs_init);
++module_exit(hyper_sysfs_exit);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/core/xencomm.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/core/xencomm.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,229 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ */
 +
-+	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
-+	if (rq == NULL)
-+		return -1;
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <xen/xencomm.h>
++#include <xen/interface/xen.h>
++#ifdef __ia64__
++#include <asm/xen/xencomm.h>	/* for is_kern_addr() */
++#endif
 +
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
-+	elevator_init(rq, "noop");
-+#else
-+	elevator_init(rq, &elevator_noop);
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
 +#endif
 +
-+	/* Hard sector size and max sectors impersonate the equiv. hardware. */
-+	blk_queue_hardsect_size(rq, sector_size);
-+	blk_queue_max_sectors(rq, 512);
++static int xencomm_init(struct xencomm_desc *desc,
++			void *buffer, unsigned long bytes)
++{
++	unsigned long recorded = 0;
++	int i = 0;
 +
-+	/* Each segment in a request is up to an aligned page in size. */
-+	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-+	blk_queue_max_segment_size(rq, PAGE_SIZE);
++	while ((recorded < bytes) && (i < desc->nr_addrs)) {
++		unsigned long vaddr = (unsigned long)buffer + recorded;
++		unsigned long paddr;
++		int offset;
++		int chunksz;
 +
-+	/* Ensure a merged request will fit in a single I/O ring slot. */
-+	blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+	blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++		offset = vaddr % PAGE_SIZE; /* handle partial pages */
++		chunksz = min(PAGE_SIZE - offset, bytes - recorded);
 +
-+	/* Make sure buffer addresses are sector-aligned. */
-+	blk_queue_dma_alignment(rq, 511);
++		paddr = xencomm_vtop(vaddr);
++		if (paddr == ~0UL) {
++			printk("%s: couldn't translate vaddr %lx\n",
++			       __func__, vaddr);
++			return -EINVAL;
++		}
 +
-+	/* Make sure we don't use bounce buffers. */
-+	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
++		desc->address[i++] = paddr;
++		recorded += chunksz;
++	}
 +
-+	gd->queue = rq;
++	if (recorded < bytes) {
++		printk("%s: could only translate %ld of %ld bytes\n",
++		       __func__, recorded, bytes);
++		return -ENOSPC;
++	}
++
++	/* mark remaining addresses invalid (just for safety) */
++	while (i < desc->nr_addrs)
++		desc->address[i++] = XENCOMM_INVALID;
++
++	desc->magic = XENCOMM_MAGIC;
 +
 +	return 0;
 +}
 +
-+static int
-+xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
-+		    u16 vdisk_info, u16 sector_size,
-+		    struct blkfront_info *info)
++static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
++					  void *buffer, unsigned long bytes)
 +{
-+	struct gendisk *gd;
-+	struct xlbd_major_info *mi;
-+	int nr_minors = 1;
-+	int err = -ENODEV;
-+	unsigned int offset;
++	struct xencomm_desc *desc;
++	unsigned long buffer_ulong = (unsigned long)buffer;
++	unsigned long start = buffer_ulong & PAGE_MASK;
++	unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
++	unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
++	unsigned long size = sizeof(*desc) +
++		sizeof(desc->address[0]) * nr_addrs;
++
++	/*
++	 * slab allocator returns at least sizeof(void*) aligned pointer.
++	 * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
++	 * cross page boundary.
++	 */
++	if (sizeof(*desc) > sizeof(void*)) {
++		unsigned long order = get_order(size);
++		desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
++							       order);
++		if (desc == NULL)
++			return NULL;
 +
-+	BUG_ON(info->gd != NULL);
-+	BUG_ON(info->mi != NULL);
-+	BUG_ON(info->rq != NULL);
++		desc->nr_addrs =
++			((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
++			sizeof(*desc->address);
++	} else {
++		desc = kmalloc(size, gfp_mask);
++		if (desc == NULL)
++			return NULL;
 +
-+	mi = xlbd_get_major_info(vdevice);
-+	if (mi == NULL)
-+		goto out;
-+	info->mi = mi;
++		desc->nr_addrs = nr_addrs;
++	}
++	return desc;
++}
 +
-+	if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
-+		nr_minors = 1 << mi->type->partn_shift;
++void xencomm_free(struct xencomm_handle *desc)
++{
++	if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
++		struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
++		if (sizeof(*desc__) > sizeof(void*)) {
++			unsigned long size = sizeof(*desc__) +
++				sizeof(desc__->address[0]) * desc__->nr_addrs;
++			unsigned long order = get_order(size);
++			free_pages((unsigned long)__va(desc), order);
++		} else
++			kfree(__va(desc));
++	}
++}
 +
-+	gd = alloc_disk(nr_minors);
-+	if (gd == NULL)
-+		goto out;
++static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask)
++{
++	struct xencomm_desc *desc;
++	int rc;
 +
-+	offset =  mi->index * mi->type->disks_per_major +
-+			(minor >> mi->type->partn_shift);
-+	if (nr_minors > 1) {
-+		if (offset < 26) {
-+			sprintf(gd->disk_name, "%s%c",
-+				 mi->type->diskname, 'a' + offset );
-+		}
-+		else {
-+			sprintf(gd->disk_name, "%s%c%c",
-+				mi->type->diskname,
-+				'a' + ((offset/26)-1), 'a' + (offset%26) );
-+		}
-+	}
-+	else {
-+		if (offset < 26) {
-+			sprintf(gd->disk_name, "%s%c%d",
-+				mi->type->diskname,
-+				'a' + offset,
-+				minor & ((1 << mi->type->partn_shift) - 1));
-+		}
-+		else {
-+			sprintf(gd->disk_name, "%s%c%c%d",
-+				mi->type->diskname,
-+				'a' + ((offset/26)-1), 'a' + (offset%26),
-+				minor & ((1 << mi->type->partn_shift) - 1));
-+		}
++	pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
++
++	if (bytes == 0) {
++		/* don't create a descriptor; Xen recognizes NULL. */
++		BUG_ON(buffer != NULL);
++		*ret = NULL;
++		return 0;
 +	}
 +
-+	gd->major = mi->major;
-+	gd->first_minor = minor;
-+	gd->fops = &xlvbd_block_fops;
-+	gd->private_data = info;
-+	gd->driverfs_dev = &(info->xbdev->dev);
-+	set_capacity(gd, capacity);
++	BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
 +
-+	if (xlvbd_init_blk_queue(gd, sector_size)) {
-+		del_gendisk(gd);
-+		goto out;
++	desc = xencomm_alloc(gfp_mask, buffer, bytes);
++	if (!desc) {
++		printk("%s failure\n", "xencomm_alloc");
++		return -ENOMEM;
 +	}
 +
-+	info->rq = gd->queue;
-+	info->gd = gd;
++	rc = xencomm_init(desc, buffer, bytes);
++	if (rc) {
++		printk("%s failure: %d\n", "xencomm_init", rc);
++		xencomm_free((struct xencomm_handle *)__pa(desc));
++		return rc;
++	}
 +
-+	if (info->feature_barrier)
-+		xlvbd_barrier(info);
++	*ret = desc;
++	return 0;
++}
 +
-+	if (vdisk_info & VDISK_READONLY)
-+		set_disk_ro(gd, 1);
++/* check if memory address is within VMALLOC region  */
++static int is_phys_contiguous(unsigned long addr)
++{
++	if (!is_kernel_addr(addr))
++		return 0;
 +
-+	if (vdisk_info & VDISK_REMOVABLE)
-+		gd->flags |= GENHD_FL_REMOVABLE;
++	return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
++}
 +
-+	if (vdisk_info & VDISK_CDROM)
-+		gd->flags |= GENHD_FL_CD;
++static struct xencomm_handle *xencomm_create_inline(void *ptr)
++{
++	unsigned long paddr;
 +
-+	return 0;
++	BUG_ON(!is_phys_contiguous((unsigned long)ptr));
 +
-+ out:
-+	if (mi)
-+		xlbd_put_major_info(mi);
-+	info->mi = NULL;
-+	return err;
++	paddr = (unsigned long)xencomm_pa(ptr);
++	BUG_ON(paddr & XENCOMM_INLINE_FLAG);
++	return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
 +}
 +
-+int
-+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
-+	  u16 sector_size, struct blkfront_info *info)
++/* "mini" routine, for stack-based communications: */
++static int xencomm_create_mini(void *buffer,
++	unsigned long bytes, struct xencomm_mini *xc_desc,
++	struct xencomm_desc **ret)
 +{
-+	struct block_device *bd;
-+	int err = 0;
++	int rc = 0;
++	struct xencomm_desc *desc;
++	BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
 +
-+	info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
++	desc = (void *)xc_desc; 
 +
-+	bd = bdget(info->dev);
-+	if (bd == NULL)
-+		return -ENODEV;
++	desc->nr_addrs = XENCOMM_MINI_ADDRS;
 +
-+	err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
-+				  vdisk_info, sector_size, info);
++	if (!(rc = xencomm_init(desc, buffer, bytes)))
++		*ret = desc;
 +
-+	bdput(bd);
-+	return err;
++	return rc;
 +}
 +
-+void
-+xlvbd_del(struct blkfront_info *info)
++struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
 +{
-+	if (info->mi == NULL)
-+		return;
++	int rc;
++	struct xencomm_desc *desc;
 +
-+	BUG_ON(info->gd == NULL);
-+	del_gendisk(info->gd);
-+	put_disk(info->gd);
-+	info->gd = NULL;
++	if (is_phys_contiguous((unsigned long)ptr))
++		return xencomm_create_inline(ptr);
 +
-+	xlbd_put_major_info(info->mi);
-+	info->mi = NULL;
++	rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
 +
-+	BUG_ON(info->rq == NULL);
-+	blk_cleanup_queue(info->rq);
-+	info->rq = NULL;
++	if (rc || desc == NULL)
++		return NULL;
++
++	return xencomm_pa(desc);
 +}
 +
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+int
-+xlvbd_barrier(struct blkfront_info *info)
++struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, 
++			struct xencomm_mini *xc_desc)
 +{
-+	int err;
++	int rc;
++	struct xencomm_desc *desc = NULL;
 +
-+	err = blk_queue_ordered(info->rq,
-+		info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
-+	if (err)
-+		return err;
-+	printk("blkfront: %s: barriers %s\n",
-+	       info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
-+	return 0;
-+}
-+#else
-+int
-+xlvbd_barrier(struct blkfront_info *info)
-+{
-+	printk("blkfront: %s: barriers disabled\n", info->gd->disk_name);
-+	return -ENOSYS;
++	if (is_phys_contiguous((unsigned long)ptr))
++		return xencomm_create_inline(ptr);
++
++	rc = xencomm_create_mini(ptr, bytes, xc_desc,
++				&desc);
++
++	if (rc)
++		return NULL;
++ 
++	return xencomm_pa(desc);
 +}
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blktap/blktap.c tmp-linux-2.6-xen.patch/drivers/xen/blktap/blktap.c
---- pristine-linux-2.6.18.2/drivers/xen/blktap/blktap.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blktap/blktap.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1632 @@
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/evtchn/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/evtchn/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= evtchn.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/evtchn/evtchn.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/evtchn/evtchn.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,560 @@
 +/******************************************************************************
-+ * drivers/xen/blktap/blktap.c
++ * evtchn.c
 + * 
-+ * Back-end driver for user level virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. Requests
-+ * are remapped to a user-space memory region.
-+ *
-+ * Based on the blkback driver code.
++ * Driver for receiving and demuxing event-channel signals.
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * Multi-process extensions Copyright (c) 2004, Steven Smith
 + * 
-+ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
-+ *
-+ * Clean ups and fix ups:
-+ *    Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
-+ *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License version 2
 + * as published by the Free Software Foundation; or, when distributed
@@ -54345,2614 +90108,2890 @@
 + * IN THE SOFTWARE.
 + */
 +
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/driver_util.h>
++#include <linux/module.h>
 +#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
 +#include <linux/fs.h>
-+#include <linux/mm.h>
 +#include <linux/errno.h>
++#include <linux/miscdevice.h>
 +#include <linux/major.h>
-+#include <linux/gfp.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
 +#include <linux/poll.h>
-+#include <asm/tlbflush.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/gfp.h>
++#include <linux/mutex.h>
++#include <linux/cpu.h>
++#include <xen/evtchn.h>
++#include <xen/public/evtchn.h>
 +
-+#define MAX_TAP_DEV 256     /*the maximum number of tapdisk ring devices    */
-+#define MAX_DEV_NAME 100    /*the max tapdisk ring device name e.g. blktap0 */
++struct per_user_data {
++	/* Notification ring, accessed via /dev/xen/evtchn. */
++#define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
++#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
++	evtchn_port_t *ring;
++	unsigned int ring_cons, ring_prod, ring_overflow;
++	struct mutex ring_cons_mutex; /* protect against concurrent readers */
 +
-+/*
-+ * The maximum number of requests that can be outstanding at any time
-+ * is determined by 
-+ *
-+ *   [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST] 
-+ *
-+ * where mmap_alloc < MAX_DYNAMIC_MEM.
-+ *
-+ * TODO:
-+ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
-+ * sysfs.
-+ */
-+#define BLK_RING_SIZE		__RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-+#define MAX_DYNAMIC_MEM		BLK_RING_SIZE
-+#define MAX_PENDING_REQS	BLK_RING_SIZE
-+#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-+#define MMAP_VADDR(_start, _req,_seg)                                   \
-+        (_start +                                                       \
-+         ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
-+         ((_seg) * PAGE_SIZE))
-+static int blkif_reqs = MAX_PENDING_REQS;
-+static int mmap_pages = MMAP_PAGES;
++	/* Processes wait on this queue when ring is empty. */
++	wait_queue_head_t evtchn_wait;
++	struct fasync_struct *evtchn_async_queue;
 +
-+#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
-+		      * have a bunch of pages reserved for shared
-+		      * memory rings.
-+		      */
++	int bind_cpu;
++	int nr_event_wrong_delivery;
++};
 +
-+/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
-+typedef struct domid_translate {
-+	unsigned short domid;
-+	unsigned short busid;
-+} domid_translate_t ;
++/* Who's bound to each port? */
++static struct per_user_data *port_user[NR_EVENT_CHANNELS];
++static spinlock_t port_user_lock;
 +
-+/*Data struct associated with each of the tapdisk devices*/
-+typedef struct tap_blkif {
-+	struct vm_area_struct *vma;   /*Shared memory area                   */
-+	unsigned long rings_vstart;   /*Kernel memory mapping                */
-+	unsigned long user_vstart;    /*User memory mapping                  */
-+	unsigned long dev_inuse;      /*One process opens device at a time.  */
-+	unsigned long dev_pending;    /*In process of being opened           */
-+	unsigned long ring_ok;        /*make this ring->state                */
-+	blkif_front_ring_t ufe_ring;  /*Rings up to user space.              */
-+	wait_queue_head_t wait;       /*for poll                             */
-+	unsigned long mode;           /*current switching mode               */
-+	int minor;                    /*Minor number for tapdisk device      */
-+	pid_t pid;                    /*tapdisk process id                   */
-+	enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace 
-+						  shutdown                   */
-+	unsigned long *idx_map;       /*Record the user ring id to kern 
-+					[req id, idx] tuple                  */
-+	blkif_t *blkif;               /*Associate blkif with tapdev          */
-+	struct domid_translate trans; /*Translation from domid to bus.       */
-+} tap_blkif_t;
++void evtchn_device_upcall(int port)
++{
++	struct per_user_data *u;
 +
-+static struct tap_blkif *tapfds[MAX_TAP_DEV];
-+static int blktap_next_minor;
++	spin_lock(&port_user_lock);
 +
-+static int __init set_blkif_reqs(char *str)
-+{
-+	get_option(&str, &blkif_reqs);
-+	return 1;
++	mask_evtchn(port);
++	clear_evtchn(port);
++
++	if ((u = port_user[port]) != NULL) {
++		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
++			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
++			wmb(); /* Ensure ring contents visible */
++			if (u->ring_cons == u->ring_prod++) {
++				wake_up_interruptible(&u->evtchn_wait);
++				kill_fasync(&u->evtchn_async_queue,
++					    SIGIO, POLL_IN);
++			}
++		} else {
++			u->ring_overflow = 1;
++		}
++	}
++
++	spin_unlock(&port_user_lock);
 +}
-+__setup("blkif_reqs=", set_blkif_reqs);
 +
-+/* Run-time switchable: /sys/module/blktap/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
++static void evtchn_check_wrong_delivery(struct per_user_data *u)
++{
++	evtchn_port_t port;
++	unsigned int current_cpu = smp_processor_id();
 +
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a 
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a 
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+	blkif_t       *blkif;
-+	u64            id;
-+	unsigned short mem_idx;
-+	int            nr_pages;
-+	atomic_t       pendcnt;
-+	unsigned short operation;
-+	int            status;
-+	struct list_head free_list;
-+	int            inuse;
-+} pending_req_t;
++	/* Delivered to correct CPU? All is good. */
++	if (u->bind_cpu == current_cpu) {
++		u->nr_event_wrong_delivery = 0;
++		return;
++	}
 +
-+static pending_req_t *pending_reqs[MAX_PENDING_REQS];
-+static struct list_head pending_free;
-+static DEFINE_SPINLOCK(pending_free_lock);
-+static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
-+static int alloc_pending_reqs;
++	/* Tolerate up to 100 consecutive misdeliveries. */
++	if (++u->nr_event_wrong_delivery < 100)
++		return;
 +
-+typedef unsigned int PEND_RING_IDX;
++	spin_lock_irq(&port_user_lock);
 +
-+static inline int MASK_PEND_IDX(int i) { 
-+	return (i & (MAX_PENDING_REQS-1));
-+}
++	for (port = 0; port < NR_EVENT_CHANNELS; port++)
++		if (port_user[port] == u)
++			rebind_evtchn_to_cpu(port, current_cpu);
 +
-+static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
-+	return (req - pending_reqs[idx]);
++	u->bind_cpu = current_cpu;
++	u->nr_event_wrong_delivery = 0;
++
++	spin_unlock_irq(&port_user_lock);
 +}
 +
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++static ssize_t evtchn_read(struct file *file, char __user *buf,
++			   size_t count, loff_t *ppos)
++{
++	int rc;
++	unsigned int c, p, bytes1 = 0, bytes2 = 0;
++	struct per_user_data *u = file->private_data;
 +
-+#define BLKBACK_INVALID_HANDLE (~0)
++	/* Whole number of ports. */
++	count &= ~(sizeof(evtchn_port_t)-1);
 +
-+static struct page **foreign_pages[MAX_DYNAMIC_MEM];
-+static inline unsigned long idx_to_kaddr(
-+	unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
-+{
-+	unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
-+	unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
-+	return (unsigned long)pfn_to_kaddr(pfn);
-+}
++	if (count == 0)
++		return 0;
 +
-+static unsigned short mmap_alloc = 0;
-+static unsigned short mmap_lock = 0;
-+static unsigned short mmap_inuse = 0;
++	if (count > PAGE_SIZE)
++		count = PAGE_SIZE;
 +
-+/******************************************************************
-+ * GRANT HANDLES
-+ */
++	for (;;) {
++		mutex_lock(&u->ring_cons_mutex);
 +
-+/* When using grant tables to map a frame for device access then the
-+ * handle returned must be used to unmap the frame. This is needed to
-+ * drop the ref count on the frame.
-+ */
-+struct grant_handle_pair
-+{
-+        grant_handle_t kernel;
-+        grant_handle_t user;
-+};
-+#define INVALID_GRANT_HANDLE	0xFFFF
++		rc = -EFBIG;
++		if (u->ring_overflow)
++			goto unlock_out;
 +
-+static struct grant_handle_pair 
-+    pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
-+#define pending_handle(_id, _idx, _i) \
-+    (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
-+    + (_i)])
++		if ((c = u->ring_cons) != (p = u->ring_prod))
++			break;
 +
++		mutex_unlock(&u->ring_cons_mutex);
 +
-+static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
++		if (file->f_flags & O_NONBLOCK)
++			return -EAGAIN;
 +
-+#define BLKTAP_MINOR 0  /*/dev/xen/blktap has a dynamic major */
-+#define BLKTAP_DEV_DIR  "/dev/xen"
++		rc = wait_event_interruptible(
++			u->evtchn_wait, u->ring_cons != u->ring_prod);
++		if (rc)
++			return rc;
++	}
 +
-+static int blktap_major;
++	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
++	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
++		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
++			sizeof(evtchn_port_t);
++		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
++	} else {
++		bytes1 = (p - c) * sizeof(evtchn_port_t);
++		bytes2 = 0;
++	}
 +
-+/* blktap IOCTLs: */
-+#define BLKTAP_IOCTL_KICK_FE         1
-+#define BLKTAP_IOCTL_KICK_BE         2 /* currently unused */
-+#define BLKTAP_IOCTL_SETMODE         3
-+#define BLKTAP_IOCTL_SENDPID	     4
-+#define BLKTAP_IOCTL_NEWINTF	     5
-+#define BLKTAP_IOCTL_MINOR	     6
-+#define BLKTAP_IOCTL_MAJOR	     7
-+#define BLKTAP_QUERY_ALLOC_REQS      8
-+#define BLKTAP_IOCTL_FREEINTF        9
-+#define BLKTAP_IOCTL_PRINT_IDXS      100  
++	/* Truncate chunks according to caller's maximum byte count. */
++	if (bytes1 > count) {
++		bytes1 = count;
++		bytes2 = 0;
++	} else if ((bytes1 + bytes2) > count) {
++		bytes2 = count - bytes1;
++	}
 +
-+/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE)             */
-+#define BLKTAP_MODE_PASSTHROUGH      0x00000000  /* default            */
-+#define BLKTAP_MODE_INTERCEPT_FE     0x00000001
-+#define BLKTAP_MODE_INTERCEPT_BE     0x00000002  /* unimp.             */
++	rc = -EFAULT;
++	rmb(); /* Ensure that we see the port before we copy it. */
++	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
++	    ((bytes2 != 0) &&
++	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
++		goto unlock_out;
++	
++	evtchn_check_wrong_delivery(u);
 +
-+#define BLKTAP_MODE_INTERPOSE \
-+           (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
++	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
++	rc = bytes1 + bytes2;
 +
++ unlock_out:
++	mutex_unlock(&u->ring_cons_mutex);
++	return rc;
++}
 +
-+static inline int BLKTAP_MODE_VALID(unsigned long arg)
++static ssize_t evtchn_write(struct file *file, const char __user *buf,
++			    size_t count, loff_t *ppos)
 +{
-+	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
-+		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
-+                (arg == BLKTAP_MODE_INTERPOSE   ));
-+}
++	int rc, i;
++	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++	struct per_user_data *u = file->private_data;
 +
-+/* Requests passing through the tap to userspace are re-assigned an ID.
-+ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
-+ * ring ID. 
-+ */
++	if (kbuf == NULL)
++		return -ENOMEM;
 +
-+static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
-+{
-+        return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++	/* Whole number of ports. */
++	count &= ~(sizeof(evtchn_port_t)-1);
++
++	rc = 0;
++	if (count == 0)
++		goto out;
++
++	if (count > PAGE_SIZE)
++		count = PAGE_SIZE;
++
++	rc = -EFAULT;
++	if (copy_from_user(kbuf, buf, count) != 0)
++		goto out;
++
++	spin_lock_irq(&port_user_lock);
++	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
++		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
++			unmask_evtchn(kbuf[i]);
++	spin_unlock_irq(&port_user_lock);
++
++	rc = count;
++
++ out:
++	free_page((unsigned long)kbuf);
++	return rc;
 +}
 +
-+extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
++static unsigned int next_bind_cpu(cpumask_t map)
 +{
-+        return (PEND_RING_IDX)(id & 0x0000ffff);
++	static unsigned int bind_cpu;
++	bind_cpu = next_cpu(bind_cpu, map);
++	if (bind_cpu >= NR_CPUS)
++		bind_cpu = first_cpu(map);
++	return bind_cpu;
 +}
 +
-+extern inline int ID_TO_MIDX(unsigned long id)
++static void evtchn_bind_to_user(struct per_user_data *u, int port)
 +{
-+        return (int)(id >> 16);
-+}
++	spin_lock_irq(&port_user_lock);
 +
-+#define INVALID_REQ 0xdead0000
++	BUG_ON(port_user[port] != NULL);
++	port_user[port] = u;
 +
-+/*TODO: Convert to a free list*/
-+static inline int GET_NEXT_REQ(unsigned long *idx_map)
-+{
-+	int i;
-+	for (i = 0; i < MAX_PENDING_REQS; i++)
-+		if (idx_map[i] == INVALID_REQ)
-+			return i;
++	if (u->bind_cpu == -1)
++		u->bind_cpu = next_bind_cpu(cpu_online_map);
 +
-+	return INVALID_REQ;
-+}
++	rebind_evtchn_to_cpu(port, u->bind_cpu);
 +
-+static inline int OFFSET_TO_USR_IDX(int offset)
-+{
-+	return offset / BLKIF_MAX_SEGMENTS_PER_REQUEST;
++	unmask_evtchn(port);
++
++	spin_unlock_irq(&port_user_lock);
 +}
 +
-+static inline int OFFSET_TO_SEG(int offset)
++static long evtchn_ioctl(struct file *file,
++			 unsigned int cmd, unsigned long arg)
 +{
-+	return offset % BLKIF_MAX_SEGMENTS_PER_REQUEST;
-+}
++	int rc;
++	struct per_user_data *u = file->private_data;
++	void __user *uarg = (void __user *) arg;
 +
++	switch (cmd) {
++	case IOCTL_EVTCHN_BIND_VIRQ: {
++		struct ioctl_evtchn_bind_virq bind;
++		struct evtchn_bind_virq bind_virq;
 +
-+#define BLKTAP_INVALID_HANDLE(_g) \
-+    (((_g->kernel) == INVALID_GRANT_HANDLE) &&  \
-+     ((_g->user) == INVALID_GRANT_HANDLE))
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
 +
-+#define BLKTAP_INVALIDATE_HANDLE(_g) do {       \
-+    (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
-+    } while(0)
++		bind_virq.virq = bind.virq;
++		bind_virq.vcpu = 0;
++		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++						 &bind_virq);
++		if (rc != 0)
++			break;
 +
++		rc = bind_virq.port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
 +
-+/******************************************************************
-+ * BLKTAP VM OPS
-+ */
++	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
++		struct ioctl_evtchn_bind_interdomain bind;
++		struct evtchn_bind_interdomain bind_interdomain;
 +
-+static struct page *blktap_nopage(struct vm_area_struct *vma,
-+				  unsigned long address,
-+				  int *type)
-+{
-+	/*
-+	 * if the page has not been mapped in by the driver then return
-+	 * NOPAGE_SIGBUS to the domain.
-+	 */
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
 +
-+	return NOPAGE_SIGBUS;
-+}
++		bind_interdomain.remote_dom  = bind.remote_domain;
++		bind_interdomain.remote_port = bind.remote_port;
++		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++						 &bind_interdomain);
++		if (rc != 0)
++			break;
 +
-+static pte_t blktap_clear_pte(struct vm_area_struct *vma,
-+			      unsigned long uvaddr,
-+			      pte_t *ptep, int is_fullmm)
-+{
-+	pte_t copy;
-+	tap_blkif_t *info;
-+	int offset, seg, usr_idx, pending_idx, mmap_idx;
-+	unsigned long uvstart = vma->vm_start + (RING_PAGES << PAGE_SHIFT);
-+	unsigned long kvaddr;
-+	struct page **map;
-+	struct page *pg;
-+	struct grant_handle_pair *khandle;
-+	struct gnttab_unmap_grant_ref unmap[2];
-+	int count = 0;
++		rc = bind_interdomain.local_port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
 +
-+	/*
-+	 * If the address is before the start of the grant mapped region or
-+	 * if vm_file is NULL (meaning mmap failed and we have nothing to do)
-+	 */
-+	if (uvaddr < uvstart || vma->vm_file == NULL)
-+		return ptep_get_and_clear_full(vma->vm_mm, uvaddr, 
-+					       ptep, is_fullmm);
++	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
++		struct ioctl_evtchn_bind_unbound_port bind;
++		struct evtchn_alloc_unbound alloc_unbound;
 +
-+	info = vma->vm_file->private_data;
-+	map = vma->vm_private_data;
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
 +
-+	/* TODO Should these be changed to if statements? */
-+	BUG_ON(!info);
-+	BUG_ON(!info->idx_map);
-+	BUG_ON(!map);
++		alloc_unbound.dom        = DOMID_SELF;
++		alloc_unbound.remote_dom = bind.remote_domain;
++		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++						 &alloc_unbound);
++		if (rc != 0)
++			break;
 +
-+	offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT);
-+	usr_idx = OFFSET_TO_USR_IDX(offset);
-+	seg = OFFSET_TO_SEG(offset);
++		rc = alloc_unbound.port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
++
++	case IOCTL_EVTCHN_UNBIND: {
++		struct ioctl_evtchn_unbind unbind;
++		struct evtchn_close close;
++		int ret;
++
++		rc = -EFAULT;
++		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
++			break;
++
++		rc = -EINVAL;
++		if (unbind.port >= NR_EVENT_CHANNELS)
++			break;
 +
-+	pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
-+	mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++		spin_lock_irq(&port_user_lock);
++    
++		rc = -ENOTCONN;
++		if (port_user[unbind.port] != u) {
++			spin_unlock_irq(&port_user_lock);
++			break;
++		}
 +
-+	kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
-+	pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+	ClearPageReserved(pg);
-+	map[offset + RING_PAGES] = NULL;
++		port_user[unbind.port] = NULL;
++		mask_evtchn(unbind.port);
++		rebind_evtchn_to_cpu(unbind.port, 0);
 +
-+	khandle = &pending_handle(mmap_idx, pending_idx, seg);
++		spin_unlock_irq(&port_user_lock);
 +
-+	if (khandle->kernel != INVALID_GRANT_HANDLE) {
-+		gnttab_set_unmap_op(&unmap[count], kvaddr, 
-+				    GNTMAP_host_map, khandle->kernel);
-+		count++;
++		close.port = unbind.port;
++		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++		BUG_ON(ret);
 +
-+		set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, 
-+				    INVALID_P2M_ENTRY);
++		rc = 0;
++		break;
 +	}
 +
-+	if (khandle->user != INVALID_GRANT_HANDLE) {
-+		BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++	case IOCTL_EVTCHN_NOTIFY: {
++		struct ioctl_evtchn_notify notify;
 +
-+		copy = *ptep;
-+		gnttab_set_unmap_op(&unmap[count], virt_to_machine(ptep), 
-+				    GNTMAP_host_map 
-+				    | GNTMAP_application_map 
-+				    | GNTMAP_contains_pte,
-+				    khandle->user);
-+		count++;
-+	} else {
-+		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap));
++		rc = -EFAULT;
++		if (copy_from_user(&notify, uarg, sizeof(notify)))
++			break;
 +
-+		/* USING SHADOW PAGE TABLES. */
-+		copy = ptep_get_and_clear_full(vma->vm_mm, uvaddr, ptep,
-+					       is_fullmm);
++		if (notify.port >= NR_EVENT_CHANNELS) {
++			rc = -EINVAL;
++		} else if (port_user[notify.port] != u) {
++			rc = -ENOTCONN;
++		} else {
++			notify_remote_via_evtchn(notify.port);
++			rc = 0;
++		}
++		break;
 +	}
 +
-+	if (count) {
-+		BLKTAP_INVALIDATE_HANDLE(khandle);
-+		if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
-+					      unmap, count))
-+			BUG();
++	case IOCTL_EVTCHN_RESET: {
++		/* Initialise the ring to empty. Clear errors. */
++		mutex_lock(&u->ring_cons_mutex);
++		spin_lock_irq(&port_user_lock);
++		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
++		spin_unlock_irq(&port_user_lock);
++		mutex_unlock(&u->ring_cons_mutex);
++		rc = 0;
++		break;
 +	}
 +
-+	return copy;
-+}
-+
-+struct vm_operations_struct blktap_vm_ops = {
-+	nopage:   blktap_nopage,
-+	zap_pte:  blktap_clear_pte,
-+};
++	default:
++		rc = -ENOSYS;
++		break;
++	}
 +
-+/******************************************************************
-+ * BLKTAP FILE OPS
-+ */
-+ 
-+/*Function Declarations*/
-+static tap_blkif_t *get_next_free_dev(void);
-+static int blktap_open(struct inode *inode, struct file *filp);
-+static int blktap_release(struct inode *inode, struct file *filp);
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+                        unsigned int cmd, unsigned long arg);
-+static unsigned int blktap_poll(struct file *file, poll_table *wait);
++	return rc;
++}
 +
-+static const struct file_operations blktap_fops = {
-+	.owner   = THIS_MODULE,
-+	.poll    = blktap_poll,
-+	.ioctl   = blktap_ioctl,
-+	.open    = blktap_open,
-+	.release = blktap_release,
-+	.mmap    = blktap_mmap,
-+};
++static unsigned int evtchn_poll(struct file *file, poll_table *wait)
++{
++	unsigned int mask = POLLOUT | POLLWRNORM;
++	struct per_user_data *u = file->private_data;
 +
++	poll_wait(file, &u->evtchn_wait, wait);
++	if (u->ring_cons != u->ring_prod)
++		mask |= POLLIN | POLLRDNORM;
++	if (u->ring_overflow)
++		mask = POLLERR;
++	return mask;
++}
 +
-+static tap_blkif_t *get_next_free_dev(void)
++static int evtchn_fasync(int fd, struct file *filp, int on)
 +{
-+	struct class *class;
-+	tap_blkif_t *info;
-+	int minor;
++	struct per_user_data *u = filp->private_data;
++	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
++}
 +
-+	/*
-+	 * This is called only from the ioctl, which
-+	 * means we should always have interrupts enabled.
-+	 */
-+	BUG_ON(irqs_disabled());
++static int evtchn_open(struct inode *inode, struct file *filp)
++{
++	struct per_user_data *u;
 +
-+	spin_lock_irq(&pending_free_lock);
++	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
++		return -ENOMEM;
 +
-+	/* tapfds[0] is always NULL */
++	memset(u, 0, sizeof(*u));
++	init_waitqueue_head(&u->evtchn_wait);
 +
-+	for (minor = 1; minor < blktap_next_minor; minor++) {
-+		info = tapfds[minor];
-+		/* we could have failed a previous attempt. */
-+		if (!info ||
-+		    ((info->dev_inuse == 0) &&
-+		     (info->dev_pending == 0)) ) {
-+			info->dev_pending = 1;
-+			goto found;
-+		}
++	u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++	if (u->ring == NULL) {
++		kfree(u);
++		return -ENOMEM;
 +	}
-+	info = NULL;
-+	minor = -1;
-+
-+	/*
-+	 * We didn't find free device. If we can still allocate
-+	 * more, then we grab the next device minor that is
-+	 * available.  This is done while we are still under
-+	 * the protection of the pending_free_lock.
-+	 */
-+	if (blktap_next_minor < MAX_TAP_DEV)
-+		minor = blktap_next_minor++;
-+found:
-+	spin_unlock_irq(&pending_free_lock);
 +
-+	if (!info && minor > 0) {
-+		info = kzalloc(sizeof(*info), GFP_KERNEL);
-+		if (unlikely(!info)) {
-+			/*
-+			 * If we failed here, try to put back
-+			 * the next minor number. But if one
-+			 * was just taken, then we just lose this
-+			 * minor.  We can try to allocate this
-+			 * minor again later.
-+			 */
-+			spin_lock_irq(&pending_free_lock);
-+			if (blktap_next_minor == minor+1)
-+				blktap_next_minor--;
-+			spin_unlock_irq(&pending_free_lock);
-+			goto out;
-+		}
++	mutex_init(&u->ring_cons_mutex);
 +
-+		info->minor = minor;
-+		/*
-+		 * Make sure that we have a minor before others can
-+		 * see us.
-+		 */
-+		wmb();
-+		tapfds[minor] = info;
++	filp->private_data = u;
 +
-+		if ((class = get_xen_class()) != NULL)
-+			class_device_create(class, NULL,
-+					    MKDEV(blktap_major, minor), NULL,
-+					    "blktap%d", minor);
-+	}
++	u->bind_cpu = -1;
 +
-+out:
-+	return info;
++	return 0;
 +}
 +
-+int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif) 
++static int evtchn_release(struct inode *inode, struct file *filp)
 +{
-+	tap_blkif_t *info;
 +	int i;
++	struct per_user_data *u = filp->private_data;
++	struct evtchn_close close;
 +
-+	for (i = 1; i < blktap_next_minor; i++) {
-+		info = tapfds[i];
-+		if ( info &&
-+		     (info->trans.domid == domid) &&
-+		     (info->trans.busid == xenbus_id) ) {
-+			info->blkif = blkif;
-+			info->status = RUNNING;
-+			return i;
-+		}
-+	}
-+	return -1;
-+}
++	spin_lock_irq(&port_user_lock);
 +
-+void signal_tapdisk(int idx) 
-+{
-+	tap_blkif_t *info;
-+	struct task_struct *ptask;
++	free_page((unsigned long)u->ring);
 +
-+	info = tapfds[idx];
-+	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
-+		return;
++	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++		int ret;
++		if (port_user[i] != u)
++			continue;
 +
-+	if (info->pid > 0) {
-+		ptask = find_task_by_pid(info->pid);
-+		if (ptask)
-+			info->status = CLEANSHUTDOWN;
++		port_user[i] = NULL;
++		mask_evtchn(i);
++		rebind_evtchn_to_cpu(i, 0);
++
++		close.port = i;
++		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++		BUG_ON(ret);
 +	}
-+	info->blkif = NULL;
 +
-+	return;
++	spin_unlock_irq(&port_user_lock);
++
++	kfree(u);
++
++	return 0;
 +}
 +
-+static int blktap_open(struct inode *inode, struct file *filp)
-+{
-+	blkif_sring_t *sring;
-+	int idx = iminor(inode) - BLKTAP_MINOR;
-+	tap_blkif_t *info;
-+	int i;
-+	
-+	/* ctrl device, treat differently */
-+	if (!idx)
-+		return 0;
++static const struct file_operations evtchn_fops = {
++	.owner   = THIS_MODULE,
++	.read    = evtchn_read,
++	.write   = evtchn_write,
++	.unlocked_ioctl = evtchn_ioctl,
++	.poll    = evtchn_poll,
++	.fasync  = evtchn_fasync,
++	.open    = evtchn_open,
++	.release = evtchn_release,
++};
 +
-+	info = tapfds[idx];
++static struct miscdevice evtchn_miscdev = {
++	.minor        = MISC_DYNAMIC_MINOR,
++	.name         = "evtchn",
++	.fops         = &evtchn_fops,
++};
 +
-+	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
-+		WPRINTK("Unable to open device /dev/xen/blktap%d\n",
-+			idx);
-+		return -ENODEV;
++static int __cpuinit evtchn_cpu_notify(struct notifier_block *nfb,
++			unsigned long action, void *hcpu)
++{
++	int hotcpu = (unsigned long)hcpu;
++	cpumask_t map = cpu_online_map;
++	int port, newcpu;
++	struct per_user_data *u;
++
++	switch (action) {
++	case CPU_DOWN_PREPARE:
++		cpu_clear(hotcpu, map);
++		spin_lock_irq(&port_user_lock);
++		for (port = 0; port < NR_EVENT_CHANNELS; port++) {
++			if ((u = port_user[port]) != NULL && 
++			    u->bind_cpu == hotcpu &&
++			    (newcpu = next_bind_cpu(map)) < NR_CPUS) {
++				rebind_evtchn_to_cpu(port, newcpu);
++				u->bind_cpu = newcpu;
++			}
++		}
++		spin_unlock_irq(&port_user_lock);
++		break;
++	default:
++		return NOTIFY_DONE;
 +	}
++	return NOTIFY_OK;
++}
 +
-+	DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
-+	
-+	/*Only one process can access device at a time*/
-+	if (test_and_set_bit(0, &info->dev_inuse))
-+		return -EBUSY;
++static struct notifier_block __cpuinitdata evtchn_cpu_nfb = {
++	.notifier_call = evtchn_cpu_notify
++};
 +
-+	info->dev_pending = 0;
-+	    
-+	/* Allocate the fe ring. */
-+	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
-+	if (sring == NULL)
-+		goto fail_nomem;
++static int __init evtchn_init(void)
++{
++	int err;
 +
-+	SetPageReserved(virt_to_page(sring));
-+    
-+	SHARED_RING_INIT(sring);
-+	FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
-+	
-+	filp->private_data = info;
-+	info->vma = NULL;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS, 
-+				GFP_KERNEL);
-+	
-+	if (info->idx_map == NULL)
-+		goto fail_nomem;
++	spin_lock_init(&port_user_lock);
++	memset(port_user, 0, sizeof(port_user));
 +
-+	if (idx > 0) {
-+		init_waitqueue_head(&info->wait);
-+		for (i = 0; i < MAX_PENDING_REQS; i++) 
-+			info->idx_map[i] = INVALID_REQ;
++	/* Create '/dev/misc/evtchn'. */
++	err = misc_register(&evtchn_miscdev);
++	if (err != 0) {
++		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++		return err;
 +	}
 +
-+	DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
-+	return 0;
++	register_cpu_notifier(&evtchn_cpu_nfb);
 +
-+ fail_nomem:
-+	return -ENOMEM;
++	printk("Event-channel device installed.\n");
++
++	return 0;
 +}
 +
-+static int blktap_release(struct inode *inode, struct file *filp)
++static void __exit evtchn_cleanup(void)
 +{
-+	tap_blkif_t *info = filp->private_data;
-+	
-+	/* check for control device */
-+	if (!info)
-+		return 0;
++	misc_deregister(&evtchn_miscdev);
++	unregister_cpu_notifier(&evtchn_cpu_nfb);
++}
 +
-+	info->dev_inuse = 0;
-+	DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
 +
-+	/* Free the ring page. */
-+	ClearPageReserved(virt_to_page(info->ufe_ring.sring));
-+	free_page((unsigned long) info->ufe_ring.sring);
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/fbfront/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/fbfront/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_XEN_FRAMEBUFFER)	:= xenfb.o
++obj-$(CONFIG_XEN_KEYBOARD)	+= xenkbd.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/fbfront/xenfb.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/fbfront/xenfb.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,887 @@
++/*
++ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
++ *
++ * Copyright (C) 2005-2006 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  Based on linux/drivers/video/q40fb.c
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
 +
-+	/* Clear any active mappings and free foreign map table */
-+	if (info->vma) {
-+		zap_page_range(
-+			info->vma, info->vma->vm_start, 
-+			info->vma->vm_end - info->vma->vm_start, NULL);
++/*
++ * TODO:
++ *
++ * Switch to grant tables when they become capable of dealing with the
++ * frame buffer.
++ */
 +
-+		kfree(info->vma->vm_private_data);
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
 +
-+		info->vma = NULL;
-+	}
++struct xenfb_mapping
++{
++	struct list_head	link;
++	struct vm_area_struct	*vma;
++	atomic_t		map_refs;
++	int			faults;
++	struct xenfb_info	*info;
++};
 +
-+	if (info->idx_map) {
-+		kfree(info->idx_map);
-+		info->idx_map = NULL;
-+	}
++struct xenfb_info
++{
++	struct task_struct	*kthread;
++	wait_queue_head_t	wq;
 +
-+	if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
-+		if (info->blkif->xenblkd != NULL) {
-+			kthread_stop(info->blkif->xenblkd);
-+			info->blkif->xenblkd = NULL;
-+		}
-+		info->status = CLEANSHUTDOWN;
-+	}
++	unsigned char		*fb;
++	struct fb_info		*fb_info;
++	struct timer_list	refresh;
++	int			dirty;
++	int			x1, y1, x2, y2;	/* dirty rectangle,
++						   protected by dirty_lock */
++	spinlock_t		dirty_lock;
++	struct mutex		mm_lock;
++	int			nr_pages;
++	struct page		**pages;
++	struct list_head	mappings; /* protected by mm_lock */
 +
-+	return 0;
-+}
++	int			irq;
++	struct xenfb_page	*page;
++	unsigned long 		*mfns;
++	int			update_wanted; /* XENFB_TYPE_UPDATE wanted */
++	int			feature_resize; /* Backend has resize feature */
++	struct xenfb_resize	resize;
++	int			resize_dpy;
++	spinlock_t		resize_lock;
 +
++	struct xenbus_device	*xbdev;
++};
 +
-+/* Note on mmap:
-+ * We need to map pages to user space in a way that will allow the block
-+ * subsystem set up direct IO to them.  This couldn't be done before, because
-+ * there isn't really a sane way to translate a user virtual address down to a 
-+ * physical address when the page belongs to another domain.
++/*
++ * There are three locks:
++ *    spinlock resize_lock protecting resize_dpy and resize
++ *    spinlock dirty_lock protecting the dirty rectangle
++ *    mutex mm_lock protecting mappings.
 + *
-+ * My first approach was to map the page in to kernel memory, add an entry
-+ * for it in the physical frame list (using alloc_lomem_region as in blkback)
-+ * and then attempt to map that page up to user space.  This is disallowed
-+ * by xen though, which realizes that we don't really own the machine frame
-+ * underlying the physical page.
++ * How the dirty and mapping locks work together
 + *
-+ * The new approach is to provide explicit support for this in xen linux.
-+ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
-+ * mapped from other vms.  vma->vm_private_data is set up as a mapping 
-+ * from pages to actual page structs.  There is a new clause in get_user_pages
-+ * that does the right thing for this sort of mapping.
++ * The problem is that dirty rectangle and mappings aren't
++ * independent: the dirty rectangle must cover all faulted pages in
++ * mappings.  We need to prove that our locking maintains this
++ * invariant.
++ *
++ * There are several kinds of critical regions:
++ *
++ * 1. Holding only dirty_lock: xenfb_refresh().  May run in
++ *    interrupts.  Extends the dirty rectangle.  Trivially preserves
++ *    invariant.
++ *
++ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close().  Touch
++ *    only mappings.  The former creates unfaulted pages.  Preserves
++ *    invariant.  The latter removes pages.  Preserves invariant.
++ *
++ * 3. Holding both locks: xenfb_vm_nopage().  Extends the dirty
++ *    rectangle and updates mappings consistently.  Preserves
++ *    invariant.
++ *
++ * 4. The ugliest one: xenfb_update_screen().  Clear the dirty
++ *    rectangle and update mappings consistently.
++ *
++ *    We can't simply hold both locks, because zap_page_range() cannot
++ *    be called with a spinlock held.
++ *
++ *    Therefore, we first clear the dirty rectangle with both locks
++ *    held.  Then we unlock dirty_lock and update the mappings.
++ *    Critical regions that hold only dirty_lock may interfere with
++ *    that.  This can only be region 1: xenfb_refresh().  But that
++ *    just extends the dirty rectangle, which can't harm the
++ *    invariant.
++ *
++ * But FIXME: the invariant is too weak.  It misses that the fault
++ * record in mappings must be consistent with the mapping of pages in
++ * the associated address space!  do_no_page() updates the PTE after
++ * xenfb_vm_nopage() returns, i.e. outside the critical region.  This
++ * allows the following race:
++ *
++ * X writes to some address in the Xen frame buffer
++ * Fault - call do_no_page()
++ *     call xenfb_vm_nopage()
++ *         grab mm_lock
++ *         map->faults++;
++ *         release mm_lock
++ *     return back to do_no_page()
++ * (preempted, or SMP)
++ * Xen worker thread runs.
++ *      grab mm_lock
++ *      look at mappings
++ *          find this mapping, zaps its pages (but page not in pte yet)
++ *          clear map->faults
++ *      releases mm_lock
++ * (back to X process)
++ *     put page in X's pte
++ *
++ * Oh well, we wont be updating the writes to this page anytime soon.
 + */
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+	int size;
-+	struct page **map;
-+	int i;
-+	tap_blkif_t *info = filp->private_data;
++#define MB_ (1024*1024)
++#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
++
++enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
++static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
++module_param_array(video, int, NULL, 0);
++MODULE_PARM_DESC(video,
++		"Size of video memory in MB and width,height in pixels, default = (2,800,600)");
 +
-+	if (info == NULL) {
-+		WPRINTK("blktap: mmap, retrieving idx failed\n");
-+		return -ENOMEM;
-+	}
-+	
-+	vma->vm_flags |= VM_RESERVED;
-+	vma->vm_ops = &blktap_vm_ops;
++static int xenfb_fps = 20;
 +
-+	size = vma->vm_end - vma->vm_start;
-+	if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
-+		WPRINTK("you _must_ map exactly %d pages!\n",
-+		       mmap_pages + RING_PAGES);
-+		return -EAGAIN;
-+	}
++static int xenfb_remove(struct xenbus_device *);
++static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
++static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
++static void xenfb_disconnect_backend(struct xenfb_info *);
 +
-+	size >>= PAGE_SHIFT;
-+	info->rings_vstart = vma->vm_start;
-+	info->user_vstart  = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
-+    
-+	/* Map the ring pages to the start of the region and reserve it. */
-+	if (remap_pfn_range(vma, vma->vm_start, 
-+			    __pa(info->ufe_ring.sring) >> PAGE_SHIFT, 
-+			    PAGE_SIZE, vma->vm_page_prot)) {
-+		WPRINTK("Mapping user ring failed!\n");
-+		goto fail;
-+	}
++static void xenfb_send_event(struct xenfb_info *info,
++		union xenfb_out_event *event)
++{
++	__u32 prod;
 +
-+	/* Mark this VM as containing foreign pages, and set up mappings. */
-+	map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
-+		      * sizeof(struct page_struct*),
-+		      GFP_KERNEL);
-+	if (map == NULL) {
-+		WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
-+		goto fail;
-+	}
++	prod = info->page->out_prod;
++	/* caller ensures !xenfb_queue_full() */
++	mb();			/* ensure ring space available */
++	XENFB_OUT_RING_REF(info->page, prod) = *event;
++	wmb();			/* ensure ring contents visible */
++	info->page->out_prod = prod + 1;
 +
-+	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
-+		map[i] = NULL;
-+    
-+	vma->vm_private_data = map;
-+	vma->vm_flags |= VM_FOREIGN;
-+	vma->vm_flags |= VM_DONTCOPY;
++	notify_remote_via_irq(info->irq);
++}
 +
-+#ifdef CONFIG_X86
-+	vma->vm_mm->context.has_foreign_mappings = 1;
-+#endif
++static void xenfb_do_update(struct xenfb_info *info,
++			    int x, int y, int w, int h)
++{
++	union xenfb_out_event event;
 +
-+	info->vma = vma;
-+	info->ring_ok = 1;
-+	return 0;
-+ fail:
-+	/* Clear any active mappings. */
-+	zap_page_range(vma, vma->vm_start, 
-+		       vma->vm_end - vma->vm_start, NULL);
++	memset(&event, 0, sizeof(event));
++	event.type = XENFB_TYPE_UPDATE;
++	event.update.x = x;
++	event.update.y = y;
++	event.update.width = w;
++	event.update.height = h;
 +
-+	return -ENOMEM;
++	/* caller ensures !xenfb_queue_full() */
++	xenfb_send_event(info, &event);
 +}
 +
-+
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+                        unsigned int cmd, unsigned long arg)
++static void xenfb_do_resize(struct xenfb_info *info)
 +{
-+	tap_blkif_t *info = filp->private_data;
++	union xenfb_out_event event;
 +
-+	switch(cmd) {
-+	case BLKTAP_IOCTL_KICK_FE: 
-+	{
-+		/* There are fe messages to process. */
-+		return blktap_read_ufe_ring(info);
-+	}
-+	case BLKTAP_IOCTL_SETMODE:
-+	{
-+		if (info) {
-+			if (BLKTAP_MODE_VALID(arg)) {
-+				info->mode = arg;
-+				/* XXX: may need to flush rings here. */
-+				DPRINTK("blktap: set mode to %lx\n", 
-+				       arg);
-+				return 0;
-+			}
-+		}
-+		return 0;
-+	}
-+	case BLKTAP_IOCTL_PRINT_IDXS:
-+        {
-+		if (info) {
-+			printk("User Rings: \n-----------\n");
-+			printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
-+				"| req_prod: %2d, rsp_prod: %2d\n",
-+				info->ufe_ring.rsp_cons,
-+				info->ufe_ring.req_prod_pvt,
-+				info->ufe_ring.sring->req_prod,
-+				info->ufe_ring.sring->rsp_prod);
-+		}
-+            	return 0;
-+        }
-+	case BLKTAP_IOCTL_SENDPID:
-+	{
-+		if (info) {
-+			info->pid = (pid_t)arg;
-+			DPRINTK("blktap: pid received %d\n", 
-+			       info->pid);
-+		}
-+		return 0;
-+	}
-+	case BLKTAP_IOCTL_NEWINTF:
-+	{		
-+		uint64_t val = (uint64_t)arg;
-+		domid_translate_t *tr = (domid_translate_t *)&val;
++	memset(&event, 0, sizeof(event));
++	event.resize = info->resize;
 +
-+		DPRINTK("NEWINTF Req for domid %d and bus id %d\n", 
-+		       tr->domid, tr->busid);
-+		info = get_next_free_dev();
-+		if (!info) {
-+			WPRINTK("Error initialising /dev/xen/blktap - "
-+				"No more devices\n");
-+			return -1;
-+		}
-+		info->trans.domid = tr->domid;
-+		info->trans.busid = tr->busid;
-+		return info->minor;
-+	}
-+	case BLKTAP_IOCTL_FREEINTF:
-+	{
-+		unsigned long dev = arg;
-+		unsigned long flags;
++	/* caller ensures !xenfb_queue_full() */
++	xenfb_send_event(info, &event);
++}
 +
-+		info = tapfds[dev];
++static int xenfb_queue_full(struct xenfb_info *info)
++{
++	__u32 cons, prod;
 +
-+		if ((dev > MAX_TAP_DEV) || !info)
-+			return 0; /* should this be an error? */
++	prod = info->page->out_prod;
++	cons = info->page->out_cons;
++	return prod - cons == XENFB_OUT_RING_LEN;
++}
 +
-+		spin_lock_irqsave(&pending_free_lock, flags);
-+		if (info->dev_pending)
-+			info->dev_pending = 0;
-+		spin_unlock_irqrestore(&pending_free_lock, flags);
++static void xenfb_update_screen(struct xenfb_info *info)
++{
++	unsigned long flags;
++	int y1, y2, x1, x2;
++	struct xenfb_mapping *map;
 +
-+		return 0;
-+	}
-+	case BLKTAP_IOCTL_MINOR:
-+	{
-+		unsigned long dev = arg;
++	if (!info->update_wanted)
++		return;
++	if (xenfb_queue_full(info))
++		return;
 +
-+		info = tapfds[dev];
++	mutex_lock(&info->mm_lock);
 +
-+		if ((dev > MAX_TAP_DEV) || !info)
-+			return -EINVAL;
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	y1 = info->y1;
++	y2 = info->y2;
++	x1 = info->x1;
++	x2 = info->x2;
++	info->x1 = info->y1 = INT_MAX;
++	info->x2 = info->y2 = 0;
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
 +
-+		return info->minor;
++	list_for_each_entry(map, &info->mappings, link) {
++		if (!map->faults)
++			continue;
++		zap_page_range(map->vma, map->vma->vm_start,
++			       map->vma->vm_end - map->vma->vm_start, NULL);
++		map->faults = 0;
 +	}
-+	case BLKTAP_IOCTL_MAJOR:
-+		return blktap_major;
 +
-+	case BLKTAP_QUERY_ALLOC_REQS:
-+	{
-+		WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
-+		       alloc_pending_reqs, blkif_reqs);
-+		return (alloc_pending_reqs/blkif_reqs) * 100;
-+	}
++	mutex_unlock(&info->mm_lock);
++
++	if (x2 < x1 || y2 < y1) {
++		printk("xenfb_update_screen bogus rect %d %d %d %d\n",
++		       x1, x2, y1, y2);
++		WARN_ON(1);
 +	}
-+	return -ENOIOCTLCMD;
++	xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
 +}
 +
-+static unsigned int blktap_poll(struct file *filp, poll_table *wait)
++static void xenfb_handle_resize_dpy(struct xenfb_info *info)
 +{
-+	tap_blkif_t *info = filp->private_data;
-+	
-+	/* do not work on the control device */
-+	if (!info)
-+		return 0;
++	unsigned long flags;
 +
-+	poll_wait(filp, &info->wait, wait);
-+	if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
-+		RING_PUSH_REQUESTS(&info->ufe_ring);
-+		return POLLIN | POLLRDNORM;
++	spin_lock_irqsave(&info->resize_lock, flags);
++	if (info->resize_dpy) {
++		if (!xenfb_queue_full(info)) {
++			info->resize_dpy = 0;
++			xenfb_do_resize(info);
++		}
 +	}
-+	return 0;
++	spin_unlock_irqrestore(&info->resize_lock, flags);
 +}
 +
-+void blktap_kick_user(int idx)
++static int xenfb_thread(void *data)
 +{
-+	tap_blkif_t *info;
-+
-+	info = tapfds[idx];
-+
-+	if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
-+		return;
-+
-+	wake_up_interruptible(&info->wait);
++	struct xenfb_info *info = data;
 +
-+	return;
++	while (!kthread_should_stop()) {
++		xenfb_handle_resize_dpy(info);
++		if (info->dirty) {
++			info->dirty = 0;
++			xenfb_update_screen(info);
++		}
++		wait_event_interruptible(info->wq,
++			kthread_should_stop() || info->dirty);
++		try_to_freeze();
++	}
++	return 0;
 +}
 +
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, u64 id,
-+                          unsigned short op, int st);
-+
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static int req_increase(void)
++static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++			   unsigned blue, unsigned transp,
++			   struct fb_info *info)
 +{
-+	int i, j;
-+
-+	if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock) 
-+		return -EINVAL;
++	u32 v;
 +
-+	pending_reqs[mmap_alloc]  = kzalloc(sizeof(pending_req_t)
-+					    * blkif_reqs, GFP_KERNEL);
-+	foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
++	if (regno > info->cmap.len)
++		return 1;
 +
-+	if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
-+		goto out_of_memory;
++	red   >>= (16 - info->var.red.length);
++	green >>= (16 - info->var.green.length);
++	blue  >>= (16 - info->var.blue.length);
 +
-+	DPRINTK("%s: reqs=%d, pages=%d\n",
-+		__FUNCTION__, blkif_reqs, mmap_pages);
++	v = (red << info->var.red.offset) |
++	    (green << info->var.green.offset) |
++	    (blue << info->var.blue.offset);
 +
-+	for (i = 0; i < MAX_PENDING_REQS; i++) {
-+		list_add_tail(&pending_reqs[mmap_alloc][i].free_list, 
-+			      &pending_free);
-+		pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
-+		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
-+			BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc, 
-+								 i, j));
++	/* FIXME is this sane?  check against xxxfb_setcolreg()!  */
++	switch (info->var.bits_per_pixel) {
++	case 16:
++	case 24:
++	case 32:
++		((u32 *)info->pseudo_palette)[regno] = v;
++		break;
 +	}
-+
-+	mmap_alloc++;
-+	DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
++	
 +	return 0;
++}
 +
-+ out_of_memory:
-+	free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
-+	kfree(pending_reqs[mmap_alloc]);
-+	WPRINTK("%s: out of memory\n", __FUNCTION__);
-+	return -ENOMEM;
++static void xenfb_timer(unsigned long data)
++{
++	struct xenfb_info *info = (struct xenfb_info *)data;
++	wake_up(&info->wq);
 +}
 +
-+static void mmap_req_del(int mmap)
++static void __xenfb_refresh(struct xenfb_info *info,
++			    int x1, int y1, int w, int h)
 +{
-+	BUG_ON(!spin_is_locked(&pending_free_lock));
++	int y2, x2;
 +
-+	kfree(pending_reqs[mmap]);
-+	pending_reqs[mmap] = NULL;
++	y2 = y1 + h;
++	x2 = x1 + w;
 +
-+	free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
-+	foreign_pages[mmap] = NULL;
++	if (info->y1 > y1)
++		info->y1 = y1;
++	if (info->y2 < y2)
++		info->y2 = y2;
++	if (info->x1 > x1)
++		info->x1 = x1;
++	if (info->x2 < x2)
++		info->x2 = x2;
++	info->dirty = 1;
 +
-+	mmap_lock = 0;
-+	DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
-+	mmap_alloc--;
++	if (timer_pending(&info->refresh))
++		return;
++
++	mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
 +}
 +
-+static pending_req_t* alloc_req(void)
++static void xenfb_refresh(struct xenfb_info *info,
++			  int x1, int y1, int w, int h)
 +{
-+	pending_req_t *req = NULL;
 +	unsigned long flags;
 +
-+	spin_lock_irqsave(&pending_free_lock, flags);
-+
-+	if (!list_empty(&pending_free)) {
-+		req = list_entry(pending_free.next, pending_req_t, free_list);
-+		list_del(&req->free_list);
-+	}
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	__xenfb_refresh(info, x1, y1, w, h);
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
++}
 +
-+	if (req) {
-+		req->inuse = 1;
-+		alloc_pending_reqs++;
-+	}
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
++static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
++{
++	struct xenfb_info *info = p->par;
 +
-+	return req;
++	cfb_fillrect(p, rect);
++	xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
 +}
 +
-+static void free_req(pending_req_t *req)
++static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
 +{
-+	unsigned long flags;
-+	int was_empty;
-+
-+	spin_lock_irqsave(&pending_free_lock, flags);
++	struct xenfb_info *info = p->par;
 +
-+	alloc_pending_reqs--;
-+	req->inuse = 0;
-+	if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
-+		mmap_inuse--;
-+		if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
-+		spin_unlock_irqrestore(&pending_free_lock, flags);
-+		return;
-+	}
-+	was_empty = list_empty(&pending_free);
-+	list_add(&req->free_list, &pending_free);
++	cfb_imageblit(p, image);
++	xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++}
 +
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
++static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++{
++	struct xenfb_info *info = p->par;
 +
-+	if (was_empty)
-+		wake_up(&pending_free_wq);
++	cfb_copyarea(p, area);
++	xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
 +}
 +
-+static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
-+			    int tapidx)
++static void xenfb_vm_open(struct vm_area_struct *vma)
 +{
-+	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+	unsigned int i, invcount = 0;
-+	struct grant_handle_pair *khandle;
-+	uint64_t ptep;
-+	int ret, mmap_idx;
-+	unsigned long kvaddr, uvaddr;
-+	tap_blkif_t *info;
-+	
++	struct xenfb_mapping *map = vma->vm_private_data;
++	atomic_inc(&map->map_refs);
++}
 +
-+	info = tapfds[tapidx];
++static void xenfb_vm_close(struct vm_area_struct *vma)
++{
++	struct xenfb_mapping *map = vma->vm_private_data;
++	struct xenfb_info *info = map->info;
 +
-+	if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
-+		WPRINTK("fast_flush: Couldn't get info!\n");
-+		return;
++	mutex_lock(&info->mm_lock);
++	if (atomic_dec_and_test(&map->map_refs)) {
++		list_del(&map->link);
++		kfree(map);
 +	}
++	mutex_unlock(&info->mm_lock);
++}
 +
-+	if (info->vma != NULL &&
-+	    xen_feature(XENFEAT_auto_translated_physmap)) {
-+		down_write(&info->vma->vm_mm->mmap_sem);
-+		zap_page_range(info->vma, 
-+			       MMAP_VADDR(info->user_vstart, u_idx, 0), 
-+			       req->nr_pages << PAGE_SHIFT, NULL);
-+		up_write(&info->vma->vm_mm->mmap_sem);
-+		return;
-+	}
++static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
++				    unsigned long vaddr, int *type)
++{
++	struct xenfb_mapping *map = vma->vm_private_data;
++	struct xenfb_info *info = map->info;
++	int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
++	unsigned long flags;
++	struct page *page;
++	int y1, y2;
 +
-+	mmap_idx = req->mem_idx;
++	if (pgnr >= info->nr_pages)
++		return NOPAGE_SIGBUS;
 +
-+	for (i = 0; i < req->nr_pages; i++) {
-+		kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
-+		uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
++	mutex_lock(&info->mm_lock);
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	page = info->pages[pgnr];
++	get_page(page);
++	map->faults++;
 +
-+		khandle = &pending_handle(mmap_idx, k_idx, i);
++	y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
++	y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
++	if (y2 > info->fb_info->var.yres)
++		y2 = info->fb_info->var.yres;
++	__xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
++	mutex_unlock(&info->mm_lock);
 +
-+		if (khandle->kernel != INVALID_GRANT_HANDLE) {
-+			gnttab_set_unmap_op(&unmap[invcount],
-+					    idx_to_kaddr(mmap_idx, k_idx, i),
-+					    GNTMAP_host_map, khandle->kernel);
-+			invcount++;
++	if (type)
++		*type = VM_FAULT_MINOR;
 +
-+			set_phys_to_machine(
-+				__pa(idx_to_kaddr(mmap_idx, k_idx, i))
-+				>> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+		}
++	return page;
++}
 +
-+		if (khandle->user != INVALID_GRANT_HANDLE) {
-+			BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-+			if (create_lookup_pte_addr(
-+				info->vma->vm_mm,
-+				MMAP_VADDR(info->user_vstart, u_idx, i),
-+				&ptep) !=0) {
-+				WPRINTK("Couldn't get a pte addr!\n");
-+				return;
-+			}
++static struct vm_operations_struct xenfb_vm_ops = {
++	.open	= xenfb_vm_open,
++	.close	= xenfb_vm_close,
++	.nopage	= xenfb_vm_nopage,
++};
 +
-+			gnttab_set_unmap_op(&unmap[invcount], ptep,
-+					    GNTMAP_host_map
-+					    | GNTMAP_application_map
-+					    | GNTMAP_contains_pte,
-+					    khandle->user);
-+			invcount++;
-+		}
++static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++{
++	struct xenfb_info *info = fb_info->par;
++	struct xenfb_mapping *map;
++	int map_pages;
 +
-+		BLKTAP_INVALIDATE_HANDLE(khandle);
-+	}
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_unmap_grant_ref, unmap, invcount);
-+	BUG_ON(ret);
-+	
-+	if (info->vma != NULL && !xen_feature(XENFEAT_auto_translated_physmap))
-+		zap_page_range(info->vma, 
-+			       MMAP_VADDR(info->user_vstart, u_idx, 0), 
-+			       req->nr_pages << PAGE_SHIFT, NULL);
-+}
++	if (!(vma->vm_flags & VM_WRITE))
++		return -EINVAL;
++	if (!(vma->vm_flags & VM_SHARED))
++		return -EINVAL;
++	if (vma->vm_pgoff != 0)
++		return -EINVAL;
 +
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
++	map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
++	if (map_pages > info->nr_pages)
++		return -EINVAL;
 +
-+static void print_stats(blkif_t *blkif)
-+{
-+	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d\n",
-+	       current->comm, blkif->st_oo_req,
-+	       blkif->st_rd_req, blkif->st_wr_req);
-+	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+	blkif->st_rd_req = 0;
-+	blkif->st_wr_req = 0;
-+	blkif->st_oo_req = 0;
-+}
++	map = kzalloc(sizeof(*map), GFP_KERNEL);
++	if (map == NULL)
++		return -ENOMEM;
 +
-+int tap_blkif_schedule(void *arg)
-+{
-+	blkif_t *blkif = arg;
++	map->vma = vma;
++	map->faults = 0;
++	map->info = info;
++	atomic_set(&map->map_refs, 1);
 +
-+	blkif_get(blkif);
++	mutex_lock(&info->mm_lock);
++	list_add(&map->link, &info->mappings);
++	mutex_unlock(&info->mm_lock);
 +
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: started\n", current->comm);
++	vma->vm_ops = &xenfb_vm_ops;
++	vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
++	vma->vm_private_data = map;
 +
-+	while (!kthread_should_stop()) {
-+		if (try_to_freeze())
-+			continue;
++	return 0;
++}
 +
-+		wait_event_interruptible(
-+			blkif->wq,
-+			blkif->waiting_reqs || kthread_should_stop());
-+		wait_event_interruptible(
-+			pending_free_wq,
-+			!list_empty(&pending_free) || kthread_should_stop());
++static int
++xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++	struct xenfb_info *xenfb_info;
++	int required_mem_len;
 +
-+		blkif->waiting_reqs = 0;
-+		smp_mb(); /* clear flag *before* checking for work */
++	xenfb_info = info->par;
 +
-+		if (do_block_io_op(blkif))
-+			blkif->waiting_reqs = 1;
++	if (!xenfb_info->feature_resize) {
++		if (var->xres == video[KPARAM_WIDTH] &&
++			var->yres == video[KPARAM_HEIGHT] &&
++			var->bits_per_pixel == xenfb_info->page->depth) {
++			return 0;
++		}
++		return -EINVAL;
++	}
 +
-+		if (log_stats && time_after(jiffies, blkif->st_print))
-+			print_stats(blkif);
++	/* Can't resize past initial width and height */
++	if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
++		return -EINVAL;
++
++	required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
++	if (var->bits_per_pixel == xenfb_info->page->depth &&
++		var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
++		required_mem_len <= info->fix.smem_len) {
++		var->xres_virtual = var->xres;
++		var->yres_virtual = var->yres;
++		return 0;
 +	}
++	return -EINVAL;
++}
 +
-+	if (log_stats)
-+		print_stats(blkif);
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: exiting\n", current->comm);
++static int xenfb_set_par(struct fb_info *info)
++{
++	struct xenfb_info *xenfb_info;
++	unsigned long flags;
 +
-+	blkif->xenblkd = NULL;
-+	blkif_put(blkif);
++	xenfb_info = info->par;
 +
++	spin_lock_irqsave(&xenfb_info->resize_lock, flags);
++	xenfb_info->resize.type = XENFB_TYPE_RESIZE;
++	xenfb_info->resize.width = info->var.xres;
++	xenfb_info->resize.height = info->var.yres;
++	xenfb_info->resize.stride = info->fix.line_length;
++	xenfb_info->resize.depth = info->var.bits_per_pixel;
++	xenfb_info->resize.offset = 0;
++	xenfb_info->resize_dpy = 1;
++	spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
 +	return 0;
 +}
 +
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called by user level ioctl()
-+ */
++static struct fb_ops xenfb_fb_ops = {
++	.owner		= THIS_MODULE,
++	.fb_setcolreg	= xenfb_setcolreg,
++	.fb_fillrect	= xenfb_fillrect,
++	.fb_copyarea	= xenfb_copyarea,
++	.fb_imageblit	= xenfb_imageblit,
++	.fb_mmap	= xenfb_mmap,
++	.fb_check_var	= xenfb_check_var,
++	.fb_set_par     = xenfb_set_par,
++};
 +
-+static int blktap_read_ufe_ring(tap_blkif_t *info)
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
++				       struct pt_regs *regs)
 +{
-+	/* This is called to read responses from the UFE ring. */
-+	RING_IDX i, j, rp;
-+	blkif_response_t *resp;
-+	blkif_t *blkif=NULL;
-+	int pending_idx, usr_idx, mmap_idx;
-+	pending_req_t *pending_req;
-+	
-+	if (!info)
-+		return 0;
++	/*
++	 * No in events recognized, simply ignore them all.
++	 * If you need to recognize some, see xenbkd's input_handler()
++	 * for how to do that.
++	 */
++	struct xenfb_info *info = dev_id;
++	struct xenfb_page *page = info->page;
 +
-+	/* We currently only forward packets in INTERCEPT_FE mode. */
-+	if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
-+		return 0;
++	if (page->in_cons != page->in_prod) {
++		info->page->in_cons = info->page->in_prod;
++		notify_remote_via_irq(info->irq);
++	}
++	return IRQ_HANDLED;
++}
 +
-+	/* for each outstanding message on the UFEring  */
-+	rp = info->ufe_ring.sring->rsp_prod;
-+	rmb();
-+        
-+	for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
-+		blkif_response_t res;
-+		resp = RING_GET_RESPONSE(&info->ufe_ring, i);
-+		memcpy(&res, resp, sizeof(res));
-+		mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
-+		++info->ufe_ring.rsp_cons;
++static unsigned long vmalloc_to_mfn(void *address)
++{
++	return pfn_to_mfn(vmalloc_to_pfn(address));
++}
 +
-+		/*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
-+		usr_idx = (int)res.id;
-+		pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
-+		mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++static int __devinit xenfb_probe(struct xenbus_device *dev,
++				 const struct xenbus_device_id *id)
++{
++	struct xenfb_info *info;
++	struct fb_info *fb_info;
++	int fb_size;
++	int val;
++	int ret;
 +
-+		if ( (mmap_idx >= mmap_alloc) || 
-+		   (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
-+			WPRINTK("Incorrect req map"
-+			       "[%d], internal map [%d,%d (%d)]\n", 
-+			       usr_idx, mmap_idx, 
-+			       ID_TO_IDX(info->idx_map[usr_idx]),
-+			       MASK_PEND_IDX(
-+				       ID_TO_IDX(info->idx_map[usr_idx])));
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (info == NULL) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
++	}
 +
-+		pending_req = &pending_reqs[mmap_idx][pending_idx];
-+		blkif = pending_req->blkif;
++	/* Limit kernel param videoram amount to what is in xenstore */
++	if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
++		if (val < video[KPARAM_MEM])
++			video[KPARAM_MEM] = val;
++	}
 +
-+		for (j = 0; j < pending_req->nr_pages; j++) {
++	/* If requested res does not fit in available memory, use default */
++	fb_size = video[KPARAM_MEM] * MB_;
++	if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
++		video[KPARAM_WIDTH] = XENFB_WIDTH;
++		video[KPARAM_HEIGHT] = XENFB_HEIGHT;
++		fb_size = XENFB_DEFAULT_FB_LEN;
++	}
 +
-+			unsigned long kvaddr, uvaddr;
-+			struct page **map = info->vma->vm_private_data;
-+			struct page *pg;
-+			int offset;
++	dev->dev.driver_data = info;
++	info->xbdev = dev;
++	info->irq = -1;
++	info->x1 = info->y1 = INT_MAX;
++	spin_lock_init(&info->dirty_lock);
++	spin_lock_init(&info->resize_lock);
++	mutex_init(&info->mm_lock);
++	init_waitqueue_head(&info->wq);
++	init_timer(&info->refresh);
++	info->refresh.function = xenfb_timer;
++	info->refresh.data = (unsigned long)info;
++	INIT_LIST_HEAD(&info->mappings);
 +
-+			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
-+			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
++	info->fb = vmalloc(fb_size);
++	if (info->fb == NULL)
++		goto error_nomem;
++	memset(info->fb, 0, fb_size);
 +
-+			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+			ClearPageReserved(pg);
-+			offset = (uvaddr - info->vma->vm_start) 
-+				>> PAGE_SHIFT;
-+			map[offset] = NULL;
-+		}
-+		fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
-+		info->idx_map[usr_idx] = INVALID_REQ;
-+		make_response(blkif, pending_req->id, res.operation,
-+			      res.status);
-+		blkif_put(pending_req->blkif);
-+		free_req(pending_req);
-+	}
-+		
-+	return 0;
-+}
++	info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 +
++	info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
++			      GFP_KERNEL);
++	if (info->pages == NULL)
++		goto error_nomem;
 +
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
++	info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
++	if (!info->mfns)
++		goto error_nomem;
 +
-+static void blkif_notify_work(blkif_t *blkif)
-+{
-+	blkif->waiting_reqs = 1;
-+	wake_up(&blkif->wq);
-+}
++	/* set up shared page */
++	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
++	if (!info->page)
++		goto error_nomem;
 +
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	blkif_notify_work(dev_id);
-+	return IRQ_HANDLED;
-+}
++	fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
++				/* see fishy hackery below */
++	if (fb_info == NULL)
++		goto error_nomem;
 +
++	/* FIXME fishy hackery */
++	fb_info->pseudo_palette = fb_info->par;
++	fb_info->par = info;
++	/* /FIXME */
++	fb_info->screen_base = info->fb;
 +
++	fb_info->fbops = &xenfb_fb_ops;
++	fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
++	fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
++	fb_info->var.bits_per_pixel = XENFB_DEPTH;
 +
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+static int print_dbug = 1;
-+static int do_block_io_op(blkif_t *blkif)
-+{
-+	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+	blkif_request_t req;
-+	pending_req_t *pending_req;
-+	RING_IDX rc, rp;
-+	int more_to_do = 0;
-+	tap_blkif_t *info;
++	fb_info->var.red = (struct fb_bitfield){16, 8, 0};
++	fb_info->var.green = (struct fb_bitfield){8, 8, 0};
++	fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
 +
-+	rc = blk_rings->common.req_cons;
-+	rp = blk_rings->common.sring->req_prod;
-+	rmb(); /* Ensure we see queued requests up to 'rp'. */
++	fb_info->var.activate = FB_ACTIVATE_NOW;
++	fb_info->var.height = -1;
++	fb_info->var.width = -1;
++	fb_info->var.vmode = FB_VMODE_NONINTERLACED;
 +
-+	/*Check blkif has corresponding UE ring*/
-+	if (blkif->dev_num < 0) {
-+		/*oops*/
-+		if (print_dbug) {
-+			WPRINTK("Corresponding UE " 
-+			       "ring does not exist!\n");
-+			print_dbug = 0; /*We only print this message once*/
-+		}
-+		return 0;
++	fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
++	fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
++	fb_info->fix.smem_start = 0;
++	fb_info->fix.smem_len = fb_size;
++	strcpy(fb_info->fix.id, "xen");
++	fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
++	fb_info->fix.accel = FB_ACCEL_NONE;
++
++	fb_info->flags = FBINFO_FLAG_DEFAULT;
++
++	ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
++	if (ret < 0) {
++		framebuffer_release(fb_info);
++		xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
++		goto error;
 +	}
 +
-+	info = tapfds[blkif->dev_num];
++	xenfb_init_shared_page(info, fb_info);
 +
-+	if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
-+		if (print_dbug) {
-+			WPRINTK("Can't get UE info!\n");
-+			print_dbug = 0;
-+		}
-+		return 0;
++	ret = register_framebuffer(fb_info);
++	if (ret) {
++		fb_dealloc_cmap(&info->fb_info->cmap);
++		framebuffer_release(fb_info);
++		xenbus_dev_fatal(dev, ret, "register_framebuffer");
++		goto error;
 +	}
++	info->fb_info = fb_info;
 +
-+	while (rc != rp) {
-+		
-+		if (RING_FULL(&info->ufe_ring)) {
-+			WPRINTK("RING_FULL! More to do\n");
-+			more_to_do = 1;
-+			break;
-+		}
++	/* FIXME should this be delayed until backend XenbusStateConnected? */
++	info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
++	if (IS_ERR(info->kthread)) {
++		ret = PTR_ERR(info->kthread);
++		info->kthread = NULL;
++		xenbus_dev_fatal(dev, ret, "register_framebuffer");
++		goto error;
++	}
 +
-+		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
-+			WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
-+			       " More to do\n");
-+			more_to_do = 1;
-+			break;		
-+		}
++	ret = xenfb_connect_backend(dev, info);
++	if (ret < 0)
++		goto error;
 +
-+		pending_req = alloc_req();
-+		if (NULL == pending_req) {
-+			blkif->st_oo_req++;
-+			more_to_do = 1;
-+			break;
-+		}
++	return 0;
 +
-+		switch (blkif->blk_protocol) {
-+		case BLKIF_PROTOCOL_NATIVE:
-+			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
-+			       sizeof(req));
-+			break;
-+		case BLKIF_PROTOCOL_X86_32:
-+			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
-+			break;
-+		case BLKIF_PROTOCOL_X86_64:
-+			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
-+			break;
-+		default:
-+			BUG();
-+		}
-+		blk_rings->common.req_cons = ++rc; /* before make_response() */
++ error_nomem:
++	ret = -ENOMEM;
++	xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++	xenfb_remove(dev);
++	return ret;
++}
 +
-+		switch (req.operation) {
-+		case BLKIF_OP_READ:
-+			blkif->st_rd_req++;
-+			dispatch_rw_block_io(blkif, &req, pending_req);
-+			break;
++static int xenfb_resume(struct xenbus_device *dev)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
 +
-+		case BLKIF_OP_WRITE:
-+			blkif->st_wr_req++;
-+			dispatch_rw_block_io(blkif, &req, pending_req);
-+			break;
++	xenfb_disconnect_backend(info);
++	xenfb_init_shared_page(info, info->fb_info);
++	return xenfb_connect_backend(dev, info);
++}
 +
-+		default:
-+			WPRINTK("unknown operation [%d]\n",
-+				req.operation);
-+			make_response(blkif, req.id, req.operation,
-+				      BLKIF_RSP_ERROR);
-+			free_req(pending_req);
-+			break;
-+		}
++static int xenfb_remove(struct xenbus_device *dev)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
++
++	del_timer(&info->refresh);
++	if (info->kthread)
++		kthread_stop(info->kthread);
++	xenfb_disconnect_backend(info);
++	if (info->fb_info) {
++		unregister_framebuffer(info->fb_info);
++		fb_dealloc_cmap(&info->fb_info->cmap);
++		framebuffer_release(info->fb_info);
 +	}
-+		
-+	blktap_kick_user(blkif->dev_num);
++	free_page((unsigned long)info->page);
++	vfree(info->mfns);
++	kfree(info->pages);
++	vfree(info->fb);
++	kfree(info);
 +
-+	return more_to_do;
++	return 0;
 +}
 +
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req)
++static void xenfb_init_shared_page(struct xenfb_info *info,
++                                   struct fb_info * fb_info)
 +{
-+	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-+	int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-+	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+	unsigned int nseg;
-+	int ret, i, nr_sects = 0;
-+	tap_blkif_t *info;
-+	blkif_request_t *target;
-+	int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
-+	int usr_idx;
-+	uint16_t mmap_idx = pending_req->mem_idx;
++	int i;
++	int epd = PAGE_SIZE / sizeof(info->mfns[0]);
 +
-+	if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
-+		goto fail_response;
++	for (i = 0; i < info->nr_pages; i++)
++		info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
 +
-+	info = tapfds[blkif->dev_num];
-+	if (info == NULL)
-+		goto fail_response;
++	for (i = 0; i < info->nr_pages; i++)
++		info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
 +
-+	/* Check we have space on user ring - should never fail. */
-+	usr_idx = GET_NEXT_REQ(info->idx_map);
-+	if (usr_idx == INVALID_REQ) {
-+		BUG();
-+		goto fail_response;
++	for (i = 0; i * epd < info->nr_pages; i++)
++		info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
++
++	info->page->width = fb_info->var.xres;
++	info->page->height = fb_info->var.yres;
++	info->page->depth = fb_info->var.bits_per_pixel;
++	info->page->line_length = fb_info->fix.line_length;
++	info->page->mem_length = fb_info->fix.smem_len;
++	info->page->in_cons = info->page->in_prod = 0;
++	info->page->out_cons = info->page->out_prod = 0;
++}
++
++static int xenfb_connect_backend(struct xenbus_device *dev,
++				 struct xenfb_info *info)
++{
++	int ret;
++	struct xenbus_transaction xbt;
++
++	ret = bind_listening_port_to_irqhandler(
++		dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
++	if (ret < 0) {
++		xenbus_dev_fatal(dev, ret,
++				 "bind_listening_port_to_irqhandler");
++		return ret;
 +	}
++	info->irq = ret;
 +
-+	/* Check that number of segments is sane. */
-+	nseg = req->nr_segments;
-+	if ( unlikely(nseg == 0) || 
-+	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
-+		WPRINTK("Bad number of segments in request (%d)\n", nseg);
-+		goto fail_response;
++ again:
++	ret = xenbus_transaction_start(&xbt);
++	if (ret) {
++		xenbus_dev_fatal(dev, ret, "starting transaction");
++		return ret;
 +	}
-+	
-+	/* Make sure userspace is ready. */
-+	if (!info->ring_ok) {
-+		WPRINTK("blktap: ring not ready for requests!\n");
-+		goto fail_response;
++	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++			    virt_to_mfn(info->page));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    irq_to_evtchn_port(info->irq));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++			    XEN_IO_PROTO_ABI_NATIVE);
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_transaction_end(xbt, 0);
++	if (ret) {
++		if (ret == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, ret, "completing transaction");
++		return ret;
 +	}
 +
-+	if (RING_FULL(&info->ufe_ring)) {
-+		WPRINTK("blktap: fe_ring is full, can't add "
-+			"IO Request will be dropped. %d %d\n",
-+			RING_SIZE(&info->ufe_ring),
-+			RING_SIZE(&blkif->blk_rings.common));
-+		goto fail_response;
-+	}
++	xenbus_switch_state(dev, XenbusStateInitialised);
++	return 0;
 +
-+	pending_req->blkif     = blkif;
-+	pending_req->id        = req->id;
-+	pending_req->operation = operation;
-+	pending_req->status    = BLKIF_RSP_OKAY;
-+	pending_req->nr_pages  = nseg;
-+	op = 0;
-+	for (i = 0; i < nseg; i++) {
-+		unsigned long uvaddr;
-+		unsigned long kvaddr;
-+		uint64_t ptep;
-+		uint32_t flags;
++ error_xenbus:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, ret, "writing xenstore");
++	return ret;
++}
 +
-+		uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
-+		kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++static void xenfb_disconnect_backend(struct xenfb_info *info)
++{
++	if (info->irq >= 0)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = -1;
++}
 +
-+		flags = GNTMAP_host_map;
-+		if (operation == WRITE)
-+			flags |= GNTMAP_readonly;
-+		gnttab_set_map_op(&map[op], kvaddr, flags,
-+				  req->seg[i].gref, blkif->domid);
-+		op++;
++static void xenfb_backend_changed(struct xenbus_device *dev,
++				  enum xenbus_state backend_state)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
++	int val;
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* Now map it to user. */
-+			ret = create_lookup_pte_addr(info->vma->vm_mm, 
-+						     uvaddr, &ptep);
-+			if (ret) {
-+				WPRINTK("Couldn't get a pte addr!\n");
-+				goto fail_flush;
-+			}
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++	case XenbusStateReconfiguring:
++	case XenbusStateReconfigured:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
++
++	case XenbusStateInitWait:
++	InitWait:
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
++
++	case XenbusStateConnected:
++		/*
++		 * Work around xenbus race condition: If backend goes
++		 * through InitWait to Connected fast enough, we can
++		 * get Connected twice here.
++		 */
++		if (dev->state != XenbusStateConnected)
++			goto InitWait; /* no InitWait seen yet, fudge it */
 +
-+			flags = GNTMAP_host_map | GNTMAP_application_map
-+				| GNTMAP_contains_pte;
-+			if (operation == WRITE)
-+				flags |= GNTMAP_readonly;
-+			gnttab_set_map_op(&map[op], ptep, flags,
-+					  req->seg[i].gref, blkif->domid);
-+			op++;
-+		}
++		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				 "request-update", "%d", &val) < 0)
++			val = 0;
++		if (val)
++			info->update_wanted = 1;
 +
-+		nr_sects += (req->seg[i].last_sect - 
-+			     req->seg[i].first_sect + 1);
++		if (xenbus_scanf(XBT_NIL, dev->otherend,
++					"feature-resize", "%d", &val) < 0)
++			val = 0;
++		info->feature_resize = val;
++		break;
++
++	case XenbusStateClosing:
++		// FIXME is this safe in any dev->state?
++		xenbus_frontend_closed(dev);
++		break;
 +	}
++}
 +
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
-+	BUG_ON(ret);
++static const struct xenbus_device_id xenfb_ids[] = {
++	{ "vfb" },
++	{ "" }
++};
++MODULE_ALIAS("xen:vfb");
 +
-+	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+		for (i = 0; i < (nseg*2); i+=2) {
-+			unsigned long uvaddr;
-+			unsigned long kvaddr;
-+			unsigned long offset;
-+			struct page *pg;
++static struct xenbus_driver xenfb_driver = {
++	.name = "vfb",
++	.owner = THIS_MODULE,
++	.ids = xenfb_ids,
++	.probe = xenfb_probe,
++	.remove = xenfb_remove,
++	.resume = xenfb_resume,
++	.otherend_changed = xenfb_backend_changed,
++};
 +
-+			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
-+			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
++static int __init xenfb_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+			if (unlikely(map[i].status != 0)) {
-+				WPRINTK("invalid kernel buffer -- "
-+					"could not remap it\n");
-+				ret |= 1;
-+				map[i].handle = INVALID_GRANT_HANDLE;
-+			}
++	/* Nothing to do if running in dom0. */
++	if (is_initial_xendomain())
++		return -ENODEV;
 +
-+			if (unlikely(map[i+1].status != 0)) {
-+				WPRINTK("invalid user buffer -- "
-+					"could not remap it\n");
-+				ret |= 1;
-+				map[i+1].handle = INVALID_GRANT_HANDLE;
-+			}
++	return xenbus_register_frontend(&xenfb_driver);
++}
 +
-+			pending_handle(mmap_idx, pending_idx, i/2).kernel 
-+				= map[i].handle;
-+			pending_handle(mmap_idx, pending_idx, i/2).user   
-+				= map[i+1].handle;
++static void __exit xenfb_cleanup(void)
++{
++	return xenbus_unregister_driver(&xenfb_driver);
++}
 +
-+			if (ret)
-+				continue;
++module_init(xenfb_init);
++module_exit(xenfb_cleanup);
 +
-+			set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
-+					    FOREIGN_FRAME(map[i].dev_bus_addr
-+							  >> PAGE_SHIFT));
-+			offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
-+			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+			((struct page **)info->vma->vm_private_data)[offset] =
-+				pg;
-+		}
-+	} else {
-+		for (i = 0; i < nseg; i++) {
-+			unsigned long uvaddr;
-+			unsigned long kvaddr;
-+			unsigned long offset;
-+			struct page *pg;
++MODULE_LICENSE("GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/fbfront/xenkbd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/fbfront/xenkbd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,354 @@
++/*
++ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  Based on linux/drivers/input/mouse/sermouse.c
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
 +
-+			uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
-+			kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++/*
++ * TODO:
++ *
++ * Switch to grant tables together with xenfb.c.
++ */
 +
-+			if (unlikely(map[i].status != 0)) {
-+				WPRINTK("invalid kernel buffer -- "
-+					"could not remap it\n");
-+				ret |= 1;
-+				map[i].handle = INVALID_GRANT_HANDLE;
-+			}
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/kbdif.h>
++#include <xen/xenbus.h>
 +
-+			pending_handle(mmap_idx, pending_idx, i).kernel 
-+				= map[i].handle;
++struct xenkbd_info
++{
++	struct input_dev *kbd;
++	struct input_dev *ptr;
++	struct xenkbd_page *page;
++	int irq;
++	struct xenbus_device *xbdev;
++	char phys[32];
++};
 +
-+			if (ret)
-+				continue;
++static int xenkbd_remove(struct xenbus_device *);
++static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
++static void xenkbd_disconnect_backend(struct xenkbd_info *);
 +
-+			offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
-+			pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+			((struct page **)info->vma->vm_private_data)[offset] =
-+				pg;
++/*
++ * Note: if you need to send out events, see xenfb_do_update() for how
++ * to do that.
++ */
++
++static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++{
++	struct xenkbd_info *info = dev_id;
++	struct xenkbd_page *page = info->page;
++	__u32 cons, prod;
++
++	prod = page->in_prod;
++	if (prod == page->in_cons)
++		return IRQ_HANDLED;
++	rmb();			/* ensure we see ring contents up to prod */
++	for (cons = page->in_cons; cons != prod; cons++) {
++		union xenkbd_in_event *event;
++		struct input_dev *dev;
++		event = &XENKBD_IN_RING_REF(page, cons);
++
++		dev = info->ptr;
++		switch (event->type) {
++		case XENKBD_TYPE_MOTION:
++			if (event->motion.rel_z)
++				input_report_rel(dev, REL_WHEEL,
++						 -event->motion.rel_z);
++			input_report_rel(dev, REL_X, event->motion.rel_x);
++			input_report_rel(dev, REL_Y, event->motion.rel_y);
++			break;
++		case XENKBD_TYPE_KEY:
++			dev = NULL;
++			if (test_bit(event->key.keycode, info->kbd->keybit))
++				dev = info->kbd;
++			if (test_bit(event->key.keycode, info->ptr->keybit))
++				dev = info->ptr;
++			if (dev)
++				input_report_key(dev, event->key.keycode,
++						 event->key.pressed);
++			else
++				printk("xenkbd: unhandled keycode 0x%x\n",
++				       event->key.keycode);
++			break;
++		case XENKBD_TYPE_POS:
++			if (event->pos.rel_z)
++				input_report_rel(dev, REL_WHEEL,
++						 -event->pos.rel_z);
++			input_report_abs(dev, ABS_X, event->pos.abs_x);
++			input_report_abs(dev, ABS_Y, event->pos.abs_y);
++			break;
 +		}
++		if (dev)
++			input_sync(dev);
 +	}
++	mb();			/* ensure we got ring contents */
++	page->in_cons = cons;
++	notify_remote_via_irq(info->irq);
 +
-+	if (ret)
-+		goto fail_flush;
++	return IRQ_HANDLED;
++}
 +
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		down_write(&info->vma->vm_mm->mmap_sem);
-+	/* Mark mapped pages as reserved: */
-+	for (i = 0; i < req->nr_segments; i++) {
-+		unsigned long kvaddr;
-+		struct page *pg;
++int __devinit xenkbd_probe(struct xenbus_device *dev,
++			   const struct xenbus_device_id *id)
++{
++	int ret, i;
++	struct xenkbd_info *info;
++	struct input_dev *kbd, *ptr;
 +
-+		kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
-+		pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+		SetPageReserved(pg);
-+		if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+			ret = vm_insert_page(info->vma,
-+					     MMAP_VADDR(info->user_vstart,
-+							usr_idx, i), pg);
-+			if (ret) {
-+				up_write(&info->vma->vm_mm->mmap_sem);
-+				goto fail_flush;
-+			}
-+		}
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (!info) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
 +	}
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		up_write(&info->vma->vm_mm->mmap_sem);
-+	
-+	/*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
-+	info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
++	dev->dev.driver_data = info;
++	info->xbdev = dev;
++	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
 +
-+	blkif_get(blkif);
-+	/* Finally, write the request message to the user ring. */
-+	target = RING_GET_REQUEST(&info->ufe_ring,
-+				  info->ufe_ring.req_prod_pvt);
-+	memcpy(target, req, sizeof(*req));
-+	target->id = usr_idx;
-+	wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
-+	info->ufe_ring.req_prod_pvt++;
++	info->page = (void *)__get_free_page(GFP_KERNEL);
++	if (!info->page)
++		goto error_nomem;
++	info->page->in_cons = info->page->in_prod = 0;
++	info->page->out_cons = info->page->out_prod = 0;
 +
-+	if (operation == READ)
-+		blkif->st_rd_sect += nr_sects;
-+	else if (operation == WRITE)
-+		blkif->st_wr_sect += nr_sects;
++	/* keyboard */
++	kbd = input_allocate_device();
++	if (!kbd)
++		goto error_nomem;
++	kbd->name = "Xen Virtual Keyboard";
++	kbd->phys = info->phys;
++	kbd->id.bustype = BUS_PCI;
++	kbd->id.vendor = 0x5853;
++	kbd->id.product = 0xffff;
++	kbd->evbit[0] = BIT(EV_KEY);
++	for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
++		set_bit(i, kbd->keybit);
++	for (i = KEY_OK; i < KEY_MAX; i++)
++		set_bit(i, kbd->keybit);
 +
-+	return;
++	ret = input_register_device(kbd);
++	if (ret) {
++		input_free_device(kbd);
++		xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
++		goto error;
++	}
++	info->kbd = kbd;
 +
-+ fail_flush:
-+	WPRINTK("Reached Fail_flush\n");
-+	fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
-+ fail_response:
-+	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+	free_req(pending_req);
-+} 
++	/* pointing device */
++	ptr = input_allocate_device();
++	if (!ptr)
++		goto error_nomem;
++	ptr->name = "Xen Virtual Pointer";
++	ptr->phys = info->phys;
++	ptr->id.bustype = BUS_PCI;
++	ptr->id.vendor = 0x5853;
++	ptr->id.product = 0xfffe;
++	ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
++	for (i = BTN_LEFT; i <= BTN_TASK; i++)
++		set_bit(i, ptr->keybit);
++	ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
++	input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
++	input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
 +
++	ret = input_register_device(ptr);
++	if (ret) {
++		input_free_device(ptr);
++		xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
++		goto error;
++	}
++	info->ptr = ptr;
 +
++	ret = xenkbd_connect_backend(dev, info);
++	if (ret < 0)
++		goto error;
 +
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
++	return 0;
 +
++ error_nomem:
++	ret = -ENOMEM;
++	xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++	xenkbd_remove(dev);
++	return ret;
++}
 +
-+static void make_response(blkif_t *blkif, u64 id,
-+                          unsigned short op, int st)
++static int xenkbd_resume(struct xenbus_device *dev)
 +{
-+	blkif_response_t  resp;
-+	unsigned long     flags;
-+	blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+	int more_to_do = 0;
-+	int notify;
-+
-+	resp.id        = id;
-+	resp.operation = op;
-+	resp.status    = st;
++	struct xenkbd_info *info = dev->dev.driver_data;
 +
-+	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+	/* Place on the response ring for the relevant domain. */
-+	switch (blkif->blk_protocol) {
-+	case BLKIF_PROTOCOL_NATIVE:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->native,
-+					 blk_rings->native.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	case BLKIF_PROTOCOL_X86_32:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
-+					 blk_rings->x86_32.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	case BLKIF_PROTOCOL_X86_64:
-+		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
-+					 blk_rings->x86_64.rsp_prod_pvt),
-+		       &resp, sizeof(resp));
-+		break;
-+	default:
-+		BUG();
-+	}
-+	blk_rings->common.rsp_prod_pvt++;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++	xenkbd_disconnect_backend(info);
++	info->page->in_cons = info->page->in_prod = 0;
++	info->page->out_cons = info->page->out_prod = 0;
++	return xenkbd_connect_backend(dev, info);
++}
 +
-+	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-+		/*
-+		 * Tail check for pending requests. Allows frontend to avoid
-+		 * notifications if requests are already in flight (lower
-+		 * overheads and promotes batching).
-+		 */
-+		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-+	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-+		more_to_do = 1;
-+	}
++static int xenkbd_remove(struct xenbus_device *dev)
++{
++	struct xenkbd_info *info = dev->dev.driver_data;
 +
-+	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+	if (more_to_do)
-+		blkif_notify_work(blkif);
-+	if (notify)
-+		notify_remote_via_irq(blkif->irq);
++	xenkbd_disconnect_backend(info);
++	input_unregister_device(info->kbd);
++	input_unregister_device(info->ptr);
++	free_page((unsigned long)info->page);
++	kfree(info);
++	return 0;
 +}
 +
-+static int __init blkif_init(void)
++static int xenkbd_connect_backend(struct xenbus_device *dev,
++				  struct xenkbd_info *info)
 +{
-+	int i, ret;
-+	struct class *class;
++	int ret;
++	struct xenbus_transaction xbt;
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	ret = bind_listening_port_to_irqhandler(
++		dev->otherend_id, input_handler, 0, "xenkbd", info);
++	if (ret < 0) {
++		xenbus_dev_fatal(dev, ret,
++				 "bind_listening_port_to_irqhandler");
++		return ret;
++	}
++	info->irq = ret;
 +
-+	INIT_LIST_HEAD(&pending_free);
-+        for(i = 0; i < 2; i++) {
-+		ret = req_increase();
-+		if (ret)
-+			break;
++ again:
++	ret = xenbus_transaction_start(&xbt);
++	if (ret) {
++		xenbus_dev_fatal(dev, ret, "starting transaction");
++		return ret;
 +	}
-+	if (i == 0)
++	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++			    virt_to_mfn(info->page));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    irq_to_evtchn_port(info->irq));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_transaction_end(xbt, 0);
++	if (ret) {
++		if (ret == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, ret, "completing transaction");
 +		return ret;
++	}
 +
-+	tap_blkif_interface_init();
-+
-+	alloc_pending_reqs = 0;
++	xenbus_switch_state(dev, XenbusStateInitialised);
++	return 0;
 +
-+	tap_blkif_xenbus_init();
++ error_xenbus:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, ret, "writing xenstore");
++	return ret;
++}
 +
-+	/* Dynamically allocate a major for this device */
-+	ret = register_chrdev(0, "blktap", &blktap_fops);
++static void xenkbd_disconnect_backend(struct xenkbd_info *info)
++{
++	if (info->irq >= 0)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = -1;
++}
 +
-+	if (ret < 0) {
-+		WPRINTK("Couldn't register /dev/xen/blktap\n");
-+		return -ENOMEM;
-+	}	
-+	
-+	blktap_major = ret;
++static void xenkbd_backend_changed(struct xenbus_device *dev,
++				   enum xenbus_state backend_state)
++{
++	struct xenkbd_info *info = dev->dev.driver_data;
++	int ret, val;
 +
-+	/* tapfds[0] is always NULL */
-+	blktap_next_minor++;
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++	case XenbusStateReconfiguring:
++	case XenbusStateReconfigured:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
 +
-+	DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
++	case XenbusStateInitWait:
++	InitWait:
++		ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				   "feature-abs-pointer", "%d", &val);
++		if (ret < 0)
++			val = 0;
++		if (val) {
++			ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
++					    "request-abs-pointer", "1");
++			if (ret)
++				; /* FIXME */
++		}
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
 +
-+	/* Make sure the xen class exists */
-+	if ((class = get_xen_class()) != NULL) {
++	case XenbusStateConnected:
 +		/*
-+		 * This will allow udev to create the blktap ctrl device.
-+		 * We only want to create blktap0 first.  We don't want
-+		 * to flood the sysfs system with needless blktap devices.
-+		 * We only create the device when a request of a new device is
-+		 * made.
++		 * Work around xenbus race condition: If backend goes
++		 * through InitWait to Connected fast enough, we can
++		 * get Connected twice here.
 +		 */
-+		class_device_create(class, NULL,
-+				    MKDEV(blktap_major, 0), NULL,
-+				    "blktap0");
-+	} else {
-+		/* this is bad, but not fatal */
-+		WPRINTK("blktap: sysfs xen_class not created\n");
++		if (dev->state != XenbusStateConnected)
++			goto InitWait; /* no InitWait seen yet, fudge it */
++
++		/* Set input abs params to match backend screen res */
++		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				   "width", "%d", &val) > 0 )
++			input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
++
++		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				   "height", "%d", &val) > 0 )
++			input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
++
++		break;
++
++	case XenbusStateClosing:
++		xenbus_frontend_closed(dev);
++		break;
 +	}
++}
++
++static const struct xenbus_device_id xenkbd_ids[] = {
++	{ "vkbd" },
++	{ "" }
++};
++MODULE_ALIAS("xen:vkbd");
++
++static struct xenbus_driver xenkbd_driver = {
++	.name = "vkbd",
++	.owner = THIS_MODULE,
++	.ids = xenkbd_ids,
++	.probe = xenkbd_probe,
++	.remove = xenkbd_remove,
++	.resume = xenkbd_resume,
++	.otherend_changed = xenkbd_backend_changed,
++};
 +
-+	DPRINTK("Blktap device successfully created\n");
++static int __init xenkbd_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	return 0;
++	/* Nothing to do if running in dom0. */
++	if (is_initial_xendomain())
++		return -ENODEV;
++
++	return xenbus_register_frontend(&xenkbd_driver);
 +}
 +
-+module_init(blkif_init);
++static void __exit xenkbd_cleanup(void)
++{
++	return xenbus_unregister_driver(&xenkbd_driver);
++}
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blktap/common.h tmp-linux-2.6-xen.patch/drivers/xen/blktap/common.h
---- pristine-linux-2.6.18.2/drivers/xen/blktap/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blktap/common.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,121 @@
-+/* 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
++module_init(xenkbd_init);
++module_exit(xenkbd_cleanup);
++
++MODULE_LICENSE("GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/gntdev/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/gntdev/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++obj-$(CONFIG_XEN_GRANT_DEV) := gntdev.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/gntdev/gntdev.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/gntdev/gntdev.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1074 @@
++/******************************************************************************
++ * gntdev.c
 + * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
++ * Device for accessing (in user-space) pages that have been granted by other
++ * domains.
++ *
++ * Copyright (c) 2006-2007, D G Murray.
 + * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
 + * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + */
 +
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
++#include <asm/atomic.h>
 +#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <asm/uaccess.h>
 +#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/blkif.h>
 +#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/evtchn.h>
 +#include <xen/driver_util.h>
 +
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+                                    __FILE__ , __LINE__ , ## _a )
++#include <linux/types.h>
++#include <xen/public/gntdev.h>
 +
-+#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
 +
-+struct backend_info;
++#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray at cl.cam.ac.uk>"
++#define DRIVER_DESC   "User-space granted page access driver"
 +
-+typedef struct blkif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t           domid;
-+	unsigned int      handle;
-+	/* Physical parameters of the comms window. */
-+	unsigned int      irq;
-+	/* Comms information. */
-+	enum blkif_protocol blk_protocol;
-+	blkif_back_rings_t blk_rings;
-+	struct vm_struct *blk_ring_area;
-+	/* Back pointer to the backend_info. */
-+	struct backend_info *be;
-+	/* Private fields. */
-+	spinlock_t       blk_ring_lock;
-+	atomic_t         refcnt;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
 +
-+	wait_queue_head_t   wq;
-+	struct task_struct  *xenblkd;
-+	unsigned int        waiting_reqs;
-+	request_queue_t     *plug;
++#define MAX_GRANTS_LIMIT   1024
++#define DEFAULT_MAX_GRANTS 128
 +
-+	/* statistics */
-+	unsigned long       st_print;
-+	int                 st_rd_req;
-+	int                 st_wr_req;
-+	int                 st_oo_req;
-+	int                 st_rd_sect;
-+	int                 st_wr_sect;
++/* A slot can be in one of three states:
++ *
++ * 0. GNTDEV_SLOT_INVALID:
++ *    This slot is not associated with a grant reference, and is therefore free
++ *    to be overwritten by a new grant reference.
++ *
++ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
++ *    This slot is associated with a grant reference (via the 
++ *    IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
++ *
++ * 2. GNTDEV_SLOT_MAPPED:
++ *    This slot is associated with a grant reference, and has been mmap()-ed.
++ */
++typedef enum gntdev_slot_state {
++	GNTDEV_SLOT_INVALID = 0,
++	GNTDEV_SLOT_NOT_YET_MAPPED,
++	GNTDEV_SLOT_MAPPED
++} gntdev_slot_state_t;
 +
-+	wait_queue_head_t waiting_to_free;
++#define GNTDEV_INVALID_HANDLE    -1
++#define GNTDEV_FREE_LIST_INVALID -1
++/* Each opened instance of gntdev is associated with a list of grants,
++ * represented by an array of elements of the following type,
++ * gntdev_grant_info_t.
++ */
++typedef struct gntdev_grant_info {
++	gntdev_slot_state_t state;
++	union {
++		uint32_t free_list_index;
++		struct {
++			domid_t domid;
++			grant_ref_t ref;
++			grant_handle_t kernel_handle;
++			grant_handle_t user_handle;
++			uint64_t dev_bus_addr;
++		} valid;
++	} u;
++} gntdev_grant_info_t;
 +
-+	grant_handle_t shmem_handle;
-+	grant_ref_t    shmem_ref;
-+	
-+	int		dev_num;
-+	uint64_t        sectors;
-+} blkif_t;
++/* Private data structure, which is stored in the file pointer for files
++ * associated with this device.
++ */
++typedef struct gntdev_file_private_data {
++  
++	/* Array of grant information. */
++	gntdev_grant_info_t *grants;
++	uint32_t grants_size;
 +
-+blkif_t *tap_alloc_blkif(domid_t domid);
-+void tap_blkif_free(blkif_t *blkif);
-+int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
-+		  unsigned int evtchn);
-+void tap_blkif_unmap(blkif_t *blkif);
++	/* Read/write semaphore used to protect the grants array. */
++	struct rw_semaphore grants_sem;
 +
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b)					\
-+	do {						\
-+		if (atomic_dec_and_test(&(_b)->refcnt))	\
-+			wake_up(&(_b)->waiting_to_free);\
-+	} while (0)
++	/* An array of indices of free slots in the grants array.
++	 * N.B. An entry in this list may temporarily have the value
++	 * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
++	 * from the list by the contiguous allocator, but the list has not yet
++	 * been compressed. However, this is not visible across invocations of
++	 * the device.
++	 */
++	int32_t *free_list;
++	
++	/* The number of free slots in the grants array. */
++	uint32_t free_list_size;
 +
++	/* Read/write semaphore used to protect the free list. */
++	struct rw_semaphore free_list_sem;
++	
++	/* Index of the next slot after the most recent contiguous allocation, 
++	 * for use in a next-fit allocator.
++	 */
++	uint32_t next_fit_index;
 +
-+struct phys_req {
-+	unsigned short       dev;
-+	unsigned short       nr_sects;
-+	struct block_device *bdev;
-+	blkif_sector_t       sector_number;
-+};
++	/* Used to map grants into the kernel, before mapping them into user
++	 * space.
++	 */
++	struct page **foreign_pages;
 +
-+void tap_blkif_interface_init(void);
++} gntdev_file_private_data_t;
 +
-+void tap_blkif_xenbus_init(void);
++/* Module lifecycle operations. */
++static int __init gntdev_init(void);
++static void __exit gntdev_exit(void);
 +
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int tap_blkif_schedule(void *arg);
++module_init(gntdev_init);
++module_exit(gntdev_exit);
 +
-+int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
-+void signal_tapdisk(int idx);
++/* File operations. */
++static int gntdev_open(struct inode *inode, struct file *flip);
++static int gntdev_release(struct inode *inode, struct file *flip);
++static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
++static long gntdev_ioctl(struct file *flip,
++			 unsigned int cmd, unsigned long arg);
 +
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blktap/interface.c tmp-linux-2.6-xen.patch/drivers/xen/blktap/interface.c
---- pristine-linux-2.6.18.2/drivers/xen/blktap/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blktap/interface.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,174 @@
-+/******************************************************************************
-+ * drivers/xen/blktap/interface.c
-+ * 
-+ * Block-device interface management.
-+ * 
-+ * Copyright (c) 2004, Keir Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++static const struct file_operations gntdev_fops = {
++	.owner = THIS_MODULE,
++	.open = gntdev_open,
++	.release = gntdev_release,
++	.mmap = gntdev_mmap,
++	.unlocked_ioctl = gntdev_ioctl
++};
 +
-+ */
++/* VM operations. */
++static void gntdev_vma_close(struct vm_area_struct *vma);
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++			      pte_t *ptep, int is_fullmm);
 +
-+#include "common.h"
-+#include <xen/evtchn.h>
++static struct vm_operations_struct gntdev_vmops = {
++	.close = gntdev_vma_close,
++	.zap_pte = gntdev_clear_pte
++};
 +
-+static kmem_cache_t *blkif_cachep;
++/* Global variables. */
 +
-+blkif_t *tap_alloc_blkif(domid_t domid)
-+{
-+	blkif_t *blkif;
++/* The driver major number, for use when unregistering the driver. */
++static int gntdev_major;
 +
-+	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+	if (!blkif)
-+		return ERR_PTR(-ENOMEM);
++#define GNTDEV_NAME "gntdev"
 +
-+	memset(blkif, 0, sizeof(*blkif));
-+	blkif->domid = domid;
-+	spin_lock_init(&blkif->blk_ring_lock);
-+	atomic_set(&blkif->refcnt, 1);
-+	init_waitqueue_head(&blkif->wq);
-+	blkif->st_print = jiffies;
-+	init_waitqueue_head(&blkif->waiting_to_free);
++/* Memory mapping functions
++ * ------------------------
++ *
++ * Every granted page is mapped into both kernel and user space, and the two
++ * following functions return the respective virtual addresses of these pages.
++ *
++ * When shadow paging is disabled, the granted page is mapped directly into
++ * user space; when it is enabled, it is mapped into the kernel and remapped
++ * into user space using vm_insert_page() (see gntdev_mmap(), below).
++ */
 +
-+	return blkif;
++/* Returns the virtual address (in user space) of the @page_index'th page
++ * in the given VM area.
++ */
++static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
++					    int page_index)
++{
++	return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
 +}
 +
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++/* Returns the virtual address (in kernel space) of the @slot_index'th page
++ * mapped by the gntdev instance that owns the given private data struct.
++ */
++static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
++					      int slot_index)
 +{
-+	struct gnttab_map_grant_ref op;
++	unsigned long pfn;
++	void *kaddr;
++	pfn = page_to_pfn(priv->foreign_pages[slot_index]);
++	kaddr = pfn_to_kaddr(pfn);
++	return (unsigned long) kaddr;
++}
 +
-+	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+			  GNTMAP_host_map, shared_page, blkif->domid);
++/* Helper functions. */
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++/* Adds information about a grant reference to the list of grants in the file's
++ * private data structure. Returns non-zero on failure. On success, sets the
++ * value of *offset to the offset that should be mmap()-ed in order to map the
++ * grant reference.
++ */
++static int add_grant_reference(struct file *flip,
++			       struct ioctl_gntdev_grant_ref *op,
++			       uint64_t *offset)
++{
++	gntdev_file_private_data_t *private_data 
++		= (gntdev_file_private_data_t *) flip->private_data;
 +
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
++	uint32_t slot_index;
 +
-+	blkif->shmem_ref = shared_page;
-+	blkif->shmem_handle = op.handle;
++	if (unlikely(private_data->free_list_size == 0)) {
++		return -ENOMEM;
++	}
 +
-+	return 0;
-+}
++	slot_index = private_data->free_list[--private_data->free_list_size];
++	private_data->free_list[private_data->free_list_size]
++		= GNTDEV_FREE_LIST_INVALID;
 +
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+	struct gnttab_unmap_grant_ref op;
++	/* Copy the grant information into file's private data. */
++	private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
++	private_data->grants[slot_index].u.valid.domid = op->domid;
++	private_data->grants[slot_index].u.valid.ref = op->ref;
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+			    GNTMAP_host_map, blkif->shmem_handle);
++	/* The offset is calculated as the index of the chosen entry in the
++	 * file's private data's array of grant information. This is then
++	 * shifted to give an offset into the virtual "file address space".
++	 */
++	*offset = slot_index << PAGE_SHIFT;
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	return 0;
 +}
 +
-+int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
-+		  unsigned int evtchn)
++/* Adds the @count grant references to the contiguous range in the slot array
++ * beginning at @first_slot. It is assumed that @first_slot was returned by a
++ * previous invocation of find_contiguous_free_range(), during the same
++ * invocation of the driver.
++ */
++static int add_grant_references(struct file *flip,
++				int count,
++				struct ioctl_gntdev_grant_ref *ops,
++				uint32_t first_slot)
 +{
-+	int err;
-+
-+	/* Already connected through? */
-+	if (blkif->irq)
-+		return 0;
++	gntdev_file_private_data_t *private_data 
++		= (gntdev_file_private_data_t *) flip->private_data;
++	int i;
++	
++	for (i = 0; i < count; ++i) {
 +
-+	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+		return -ENOMEM;
++		/* First, mark the slot's entry in the free list as invalid. */
++		int free_list_index = 
++			private_data->grants[first_slot+i].u.free_list_index;
++		private_data->free_list[free_list_index] = 
++			GNTDEV_FREE_LIST_INVALID;
 +
-+	err = map_frontend_page(blkif, shared_page);
-+	if (err) {
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
++		/* Now, update the slot. */
++		private_data->grants[first_slot+i].state = 
++			GNTDEV_SLOT_NOT_YET_MAPPED;
++		private_data->grants[first_slot+i].u.valid.domid =
++			ops[i].domid;
++		private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
 +	}
 +
-+	switch (blkif->blk_protocol) {
-+	case BLKIF_PROTOCOL_NATIVE:
-+	{
-+		blkif_sring_t *sring;
-+		sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
-+		break;
-+	}
-+	case BLKIF_PROTOCOL_X86_32:
-+	{
-+		blkif_x86_32_sring_t *sring_x86_32;
-+		sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
-+		break;
-+	}
-+	case BLKIF_PROTOCOL_X86_64:
-+	{
-+		blkif_x86_64_sring_t *sring_x86_64;
-+		sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
-+		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
-+		break;
-+	}
-+	default:
-+		BUG();
-+	}
++	return 0;	
++}
 +
-+	err = bind_interdomain_evtchn_to_irqhandler(
-+		blkif->domid, evtchn, tap_blkif_be_int,
-+		0, "blkif-backend", blkif);
-+	if (err < 0) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_rings.common.sring = NULL;
-+		return err;
++/* Scans through the free list for @flip, removing entries that are marked as
++ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
++ * the number of valid entries.
++ */
++static void compress_free_list(struct file *flip) 
++{
++	gntdev_file_private_data_t *private_data 
++		= (gntdev_file_private_data_t *) flip->private_data;
++	int i, j = 0, old_size, slot_index;
++	
++	old_size = private_data->free_list_size;
++	for (i = 0; i < old_size; ++i) {
++		if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
++			if (i > j) {
++				slot_index = private_data->free_list[i];
++				private_data->free_list[j] = slot_index;
++				private_data->grants[slot_index].u
++					.free_list_index = j;
++				private_data->free_list[i] 
++					= GNTDEV_FREE_LIST_INVALID;
++			}
++			++j;
++		} else {
++			--private_data->free_list_size;
++		}
 +	}
-+	blkif->irq = err;
-+
-+	return 0;
 +}
 +
-+void tap_blkif_unmap(blkif_t *blkif)
++/* Searches the grant array in the private data of @flip for a range of
++ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
++ *
++ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
++ */
++static int find_contiguous_free_range(struct file *flip,
++				      uint32_t num_slots) 
 +{
-+	if (blkif->irq) {
-+		unbind_from_irqhandler(blkif->irq, blkif);
-+		blkif->irq = 0;
++	gntdev_file_private_data_t *private_data 
++		= (gntdev_file_private_data_t *) flip->private_data;
++	
++	int i;
++	int start_index = private_data->next_fit_index;
++	int range_start = 0, range_length;
++
++	if (private_data->free_list_size < num_slots) {
++		return -ENOMEM;
 +	}
-+	if (blkif->blk_rings.common.sring) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_rings.common.sring = NULL;
++
++	/* First search from the start_index to the end of the array. */
++	range_length = 0;
++	for (i = start_index; i < private_data->grants_size; ++i) {
++		if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++			if (range_length == 0) {
++				range_start = i;
++			}
++			++range_length;
++			if (range_length == num_slots) {
++				return range_start;
++			}
++		}
++	}
++	
++	/* Now search from the start of the array to the start_index. */
++	range_length = 0;
++	for (i = 0; i < start_index; ++i) {
++		if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++			if (range_length == 0) {
++				range_start = i;
++			}
++			++range_length;
++			if (range_length == num_slots) {
++				return range_start;
++			}
++		}
 +	}
++	
++	return -ENOMEM;
 +}
 +
-+void tap_blkif_free(blkif_t *blkif)
-+{
-+	atomic_dec(&blkif->refcnt);
-+	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
-+
-+	tap_blkif_unmap(blkif);
-+	kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void __init tap_blkif_interface_init(void)
++static int init_private_data(gntdev_file_private_data_t *priv,
++			     uint32_t max_grants)
 +{
-+	blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t), 
-+					 0, 0, NULL, NULL);
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blktap/Makefile tmp-linux-2.6-xen.patch/drivers/xen/blktap/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/blktap/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blktap/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,5 @@
-+LINUXINCLUDE += -I../xen/include/public/io
++	int i;
 +
-+obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++	/* Allocate space for the kernel-mapping of granted pages. */
++	priv->foreign_pages = 
++		alloc_empty_pages_and_pagevec(max_grants);
++	if (!priv->foreign_pages)
++		goto nomem_out;
 +
-+xenblktap-y := xenbus.o interface.o blktap.o 
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/blktap/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/blktap/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/blktap/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/blktap/xenbus.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,477 @@
-+/* drivers/xen/blktap/xenbus.c
-+ *
-+ * Xenbus code for blktap
-+ *
-+ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
-+ *
-+ * Based on the blkback xenbus code:
-+ *
-+ * Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+ * Copyright (C) 2005 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	/* Allocate the grant list and free-list. */
++	priv->grants = kmalloc(max_grants * sizeof(gntdev_grant_info_t),
++			       GFP_KERNEL);
++	if (!priv->grants)
++		goto nomem_out2;
++	priv->free_list = kmalloc(max_grants * sizeof(int32_t), GFP_KERNEL);
++	if (!priv->free_list)
++		goto nomem_out3;
 +
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
++	/* Initialise the free-list, which contains all slots at first. */
++	for (i = 0; i < max_grants; ++i) {
++		priv->free_list[max_grants - i - 1] = i;
++		priv->grants[i].state = GNTDEV_SLOT_INVALID;
++		priv->grants[i].u.free_list_index = max_grants - i - 1;
++	}
++	priv->grants_size = max_grants;
++	priv->free_list_size = max_grants;
++	priv->next_fit_index = 0;
 +
++	return 0;
 +
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
-+	blkif_t *blkif;
-+	struct xenbus_watch backend_watch;
-+	int xenbus_id;
-+	int group_added;
-+};
++nomem_out3:
++	kfree(priv->grants);
++nomem_out2:
++	free_empty_pages_and_pagevec(priv->foreign_pages, max_grants);
++nomem_out:
++	return -ENOMEM;
 +
++}
 +
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static int blktap_remove(struct xenbus_device *dev);
-+static int blktap_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id);
-+static void tap_backend_changed(struct xenbus_watch *, const char **,
-+			    unsigned int);
-+static void tap_frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state);
++/* Interface functions. */
 +
-+static int strsep_len(const char *str, char c, unsigned int len)
++/* Initialises the driver. Called when the module is loaded. */
++static int __init gntdev_init(void)
 +{
-+        unsigned int i;
++	struct class *class;
++	struct class_device *device;
 +
-+        for (i = 0; str[i]; i++)
-+                if (str[i] == c) {
-+                        if (len == 0)
-+                                return i;
-+                        len--;
-+                }
-+        return (len == 0) ? i : -ERANGE;
-+}
++	if (!is_running_on_xen()) {
++		printk(KERN_ERR "You must be running Xen to use gntdev\n");
++		return -ENODEV;
++	}
 +
-+static long get_id(const char *str)
-+{
-+        int len,end;
-+        const char *ptr;
-+        char *tptr, num[10];
-+	
-+        len = strsep_len(str, '/', 2);
-+        end = strlen(str);
-+        if ( (len < 0) || (end < 0) ) return -1;
-+	
-+        ptr = str + len + 1;
-+        strncpy(num,ptr,end - len);
-+        tptr = num + (end - (len + 1));
-+        *tptr = '\0';
-+	DPRINTK("Get_id called for %s (%s)\n",str,num);
-+	
-+        return simple_strtol(num, NULL, 10);
-+}				
++	gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
++	if (gntdev_major < 0)
++	{
++		printk(KERN_ERR "Could not register gntdev device\n");
++		return -ENOMEM;
++	}
 +
-+static int blktap_name(blkif_t *blkif, char *buf)
-+{
-+	char *devpath, *devname;
-+	struct xenbus_device *dev = blkif->be->dev;
++	/* Note that if the sysfs code fails, we will still initialise the
++	 * device, and output the major number so that the device can be
++	 * created manually using mknod.
++	 */
++	if ((class = get_xen_class()) == NULL) {
++		printk(KERN_ERR "Error setting up xen_class\n");
++		printk(KERN_ERR "gntdev created with major number = %d\n", 
++		       gntdev_major);
++		return 0;
++	}
 +
-+	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
-+	if (IS_ERR(devpath)) 
-+		return PTR_ERR(devpath);
-+	
-+	if ((devname = strstr(devpath, "/dev/")) != NULL)
-+		devname += strlen("/dev/");
-+	else
-+		devname  = devpath;
++	device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
++				     NULL, GNTDEV_NAME);
++	if (IS_ERR(device)) {
++		printk(KERN_ERR "Error creating gntdev device in xen_class\n");
++		printk(KERN_ERR "gntdev created with major number = %d\n",
++		       gntdev_major);
++		return 0;
++	}
 +
-+	snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
-+	kfree(devpath);
-+	
 +	return 0;
 +}
 +
-+/****************************************************************
-+ *  sysfs interface for I/O requests of blktap device
++/* Cleans up and unregisters the driver. Called when the driver is unloaded.
 + */
++static void __exit gntdev_exit(void)
++{
++	struct class *class;
++	if ((class = get_xen_class()) != NULL)
++		class_device_destroy(class, MKDEV(gntdev_major, 0));
++	unregister_chrdev(gntdev_major, GNTDEV_NAME);
++}
 +
-+#define VBD_SHOW(name, format, args...)					\
-+	static ssize_t show_##name(struct device *_dev,			\
-+				   struct device_attribute *attr,	\
-+				   char *buf)				\
-+	{								\
-+		struct xenbus_device *dev = to_xenbus_device(_dev);	\
-+		struct backend_info *be = dev->dev.driver_data;		\
-+									\
-+		return sprintf(buf, format, ##args);			\
-+	}								\
-+	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++/* Called when the device is opened. */
++static int gntdev_open(struct inode *inode, struct file *flip)
++{
++	gntdev_file_private_data_t *private_data;
 +
-+VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
-+VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
-+VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
-+VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
-+VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++	try_module_get(THIS_MODULE);
 +
-+static struct attribute *tapstat_attrs[] = {
-+	&dev_attr_oo_req.attr,
-+	&dev_attr_rd_req.attr,
-+	&dev_attr_wr_req.attr,
-+	&dev_attr_rd_sect.attr,
-+	&dev_attr_wr_sect.attr,
-+	NULL
-+};
++	/* Allocate space for the per-instance private data. */
++	private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
++	if (!private_data)
++		goto nomem_out;
 +
-+static struct attribute_group tapstat_group = {
-+	.name = "statistics",
-+	.attrs = tapstat_attrs,
-+};
++	/* These will be lazily initialised by init_private_data. */
++	private_data->grants = NULL;
++	private_data->free_list = NULL;
++	private_data->foreign_pages = NULL;
 +
-+int xentap_sysfs_addif(struct xenbus_device *dev)
-+{
-+	int err;
-+	struct backend_info *be = dev->dev.driver_data;
-+	err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
-+	if (!err)
-+		be->group_added = 1;
-+	return err;
-+}
++	init_rwsem(&private_data->grants_sem);
++	init_rwsem(&private_data->free_list_sem);
 +
-+void xentap_sysfs_delif(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->dev.driver_data;
-+	sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
-+	be->group_added = 0;
++	flip->private_data = private_data;
++
++	return 0;
++
++nomem_out:
++	return -ENOMEM;
 +}
 +
-+static int blktap_remove(struct xenbus_device *dev)
++/* Called when the device is closed.
++ */
++static int gntdev_release(struct inode *inode, struct file *flip)
 +{
-+	struct backend_info *be = dev->dev.driver_data;
-+
-+	if (be->group_added)
-+		xentap_sysfs_delif(be->dev);
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
-+	if (be->blkif) {
-+		if (be->blkif->xenblkd)
-+			kthread_stop(be->blkif->xenblkd);
-+		signal_tapdisk(be->blkif->dev_num);
-+		tap_blkif_free(be->blkif);
-+		be->blkif = NULL;
++	if (flip->private_data) {
++		gntdev_file_private_data_t *private_data = 
++			(gntdev_file_private_data_t *) flip->private_data;
++		if (private_data->foreign_pages)
++			free_empty_pages_and_pagevec
++				(private_data->foreign_pages,
++				 private_data->grants_size);
++		if (private_data->grants) 
++			kfree(private_data->grants);
++		if (private_data->free_list)
++			kfree(private_data->free_list);
++		kfree(private_data);
 +	}
-+	kfree(be);
-+	dev->dev.driver_data = NULL;
++	module_put(THIS_MODULE);
 +	return 0;
 +}
 +
-+static void tap_update_blkif_status(blkif_t *blkif)
-+{ 
-+	int err;
-+	char name[TASK_COMM_LEN];
-+
-+	/* Not ready to connect? */
-+	if(!blkif->irq || !blkif->sectors) {
-+		return;
-+	} 
++/* Called when an attempt is made to mmap() the device. The private data from
++ * @flip contains the list of grant references that can be mapped. The vm_pgoff
++ * field of @vma contains the index into that list that refers to the grant
++ * reference that will be mapped. Only mappings that are a multiple of
++ * PAGE_SIZE are handled.
++ */
++static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma) 
++{
++	struct gnttab_map_grant_ref op;
++	unsigned long slot_index = vma->vm_pgoff;
++	unsigned long kernel_vaddr, user_vaddr;
++	uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++	uint64_t ptep;
++	int ret;
++	int flags;
++	int i;
++	struct page *page;
++	gntdev_file_private_data_t *private_data = flip->private_data;
 +
-+	/* Already connected? */
-+	if (blkif->be->dev->state == XenbusStateConnected)
-+		return;
++	if (unlikely(!private_data)) {
++		printk(KERN_ERR "File's private data is NULL.\n");
++		return -EINVAL;
++	}
 +
-+	/* Attempt to connect: exit if we fail to. */
-+	connect(blkif->be);
-+	if (blkif->be->dev->state != XenbusStateConnected)
-+		return;
++	/* Test to make sure that the grants array has been initialised. */
++	down_read(&private_data->grants_sem);
++	if (unlikely(!private_data->grants)) {
++		up_read(&private_data->grants_sem);
++		printk(KERN_ERR "Attempted to mmap before ioctl.\n");
++		return -EINVAL;
++	}
++	up_read(&private_data->grants_sem);
 +
-+	err = blktap_name(blkif, name);
-+	if (err) {
-+		xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
-+		return;
++	if (unlikely((size <= 0) || 
++		     (size + slot_index) > private_data->grants_size)) {
++		printk(KERN_ERR "Invalid number of pages or offset"
++		       "(num_pages = %d, first_slot = %ld).\n",
++		       size, slot_index);
++		return -ENXIO;
 +	}
 +
-+	if (!blkif->be->group_added) {
-+		err = xentap_sysfs_addif(blkif->be->dev);
-+		if (err) {
-+			xenbus_dev_fatal(blkif->be->dev, err, 
-+					 "creating sysfs entries");
-+			return;
-+		}
++	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
++		printk(KERN_ERR "Writable mappings must be shared.\n");
++		return -EINVAL;
 +	}
 +
-+	blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
-+	if (IS_ERR(blkif->xenblkd)) {
-+		err = PTR_ERR(blkif->xenblkd);
-+		blkif->xenblkd = NULL;
-+		xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
-+		WPRINTK("Error starting thread\n");
++	/* Slots must be in the NOT_YET_MAPPED state. */
++	down_write(&private_data->grants_sem);
++	for (i = 0; i < size; ++i) {
++		if (private_data->grants[slot_index + i].state != 
++		    GNTDEV_SLOT_NOT_YET_MAPPED) {
++			printk(KERN_ERR "Slot (index = %ld) is in the wrong "
++			       "state (%d).\n", slot_index + i, 
++			       private_data->grants[slot_index + i].state);
++			up_write(&private_data->grants_sem);
++			return -EINVAL;
++		}
 +	}
-+}
 +
-+/**
-+ * Entry point to this code when a new device is created.  Allocate
-+ * the basic structures, and watch the store waiting for the
-+ * user-space program to tell us the physical device info.  Switch to
-+ * InitWait.
-+ */
-+static int blktap_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
++	/* Install the hook for unmapping. */
++	vma->vm_ops = &gntdev_vmops;
++    
++	/* The VM area contains pages from another VM. */
++	vma->vm_flags |= VM_FOREIGN;
++	vma->vm_private_data = kzalloc(size * sizeof(struct page *),
++				       GFP_KERNEL);
++	if (vma->vm_private_data == NULL) {
++		printk(KERN_ERR "Couldn't allocate mapping structure for VM "
++		       "area.\n");
 +		return -ENOMEM;
 +	}
 +
-+	be->dev = dev;
-+	dev->dev.driver_data = be;
-+	be->xenbus_id = get_id(dev->nodename);
++	/* This flag prevents Bad PTE errors when the memory is unmapped. */
++	vma->vm_flags |= VM_RESERVED;
 +
-+	be->blkif = tap_alloc_blkif(dev->otherend_id);
-+	if (IS_ERR(be->blkif)) {
-+		err = PTR_ERR(be->blkif);
-+		be->blkif = NULL;
-+		xenbus_dev_fatal(dev, err, "creating block interface");
-+		goto fail;
-+	}
++	/* This flag prevents this VM area being copied on a fork(). A better
++	 * behaviour might be to explicitly carry out the appropriate mappings
++	 * on fork(), but I don't know if there's a hook for this.
++	 */
++	vma->vm_flags |= VM_DONTCOPY;
 +
-+	/* setup back pointer */
-+	be->blkif->be = be;
-+	be->blkif->sectors = 0;
++#ifdef CONFIG_X86
++	/* This flag ensures that the page tables are not unpinned before the
++	 * VM area is unmapped. Therefore Xen still recognises the PTE as
++	 * belonging to an L1 pagetable, and the grant unmap operation will
++	 * succeed, even if the process does not exit cleanly.
++	 */
++	vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
 +
-+	/* set a watch on disk info, waiting for userspace to update details*/
-+	err = xenbus_watch_path2(dev, dev->nodename, "info",
-+				 &be->backend_watch, tap_backend_changed);
-+	if (err)
-+		goto fail;
-+	
-+	err = xenbus_switch_state(dev, XenbusStateInitWait);
-+	if (err)
-+		goto fail;
-+	return 0;
++	for (i = 0; i < size; ++i) {
 +
-+fail:
-+	DPRINTK("blktap probe failed\n");
-+	blktap_remove(dev);
-+	return err;
-+}
++		flags = GNTMAP_host_map;
++		if (!(vma->vm_flags & VM_WRITE))
++			flags |= GNTMAP_readonly;
 +
++		kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
++		user_vaddr = get_user_vaddr(vma, i);
++		page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
 +
-+/**
-+ * Callback received when the user space code has placed the device
-+ * information in xenstore. 
-+ */
-+static void tap_backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
-+{
-+	int err;
-+	unsigned long info;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
-+	
-+	/** 
-+	 * Check to see whether userspace code has opened the image 
-+	 * and written sector
-+	 * and disk info to xenstore
-+	 */
-+	err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info, 
-+			    NULL);
-+	if (XENBUS_EXIST_ERR(err))
-+		return;
-+	if (err) {
-+		xenbus_dev_error(dev, err, "getting info");
-+		return;
-+	}
++		gnttab_set_map_op(&op, kernel_vaddr, flags,   
++				  private_data->grants[slot_index+i]
++				  .u.valid.ref, 
++				  private_data->grants[slot_index+i]
++				  .u.valid.domid);
 +
-+	DPRINTK("Userspace update on disk info, %lu\n",info);
++		/* Carry out the mapping of the grant reference. */
++		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 
++						&op, 1);
++		BUG_ON(ret);
++		if (op.status) {
++			printk(KERN_ERR "Error mapping the grant reference "
++			       "into the kernel (%d). domid = %d; ref = %d\n",
++			       op.status,
++			       private_data->grants[slot_index+i]
++			       .u.valid.domid,
++			       private_data->grants[slot_index+i]
++			       .u.valid.ref);
++			goto undo_map_out;
++		}
 +
-+	err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu", 
-+			    &be->blkif->sectors, NULL);
++		/* Store a reference to the page that will be mapped into user
++		 * space.
++		 */
++		((struct page **) vma->vm_private_data)[i] = page;
 +
-+	/* Associate tap dev with domid*/
-+	be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id, 
-+					  be->blkif);
-+	DPRINTK("Thread started for domid [%d], connecting disk\n", 
-+		be->blkif->dev_num);
++		/* Mark mapped page as reserved. */
++		SetPageReserved(page);
 +
-+	tap_update_blkif_status(be->blkif);
-+}
++		/* Record the grant handle, for use in the unmap operation. */
++		private_data->grants[slot_index+i].u.valid.kernel_handle = 
++			op.handle;
++		private_data->grants[slot_index+i].u.valid.dev_bus_addr = 
++			op.dev_bus_addr;
++		
++		private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
++		private_data->grants[slot_index+i].u.valid.user_handle =
++			GNTDEV_INVALID_HANDLE;
 +
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void tap_frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state)
-+{
-+	struct backend_info *be = dev->dev.driver_data;
-+	int err;
++		/* Now perform the mapping to user space. */
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 +
-+	DPRINTK("\n");
++			/* NOT USING SHADOW PAGE TABLES. */
++			/* In this case, we map the grant(s) straight into user
++			 * space.
++			 */
 +
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+		if (dev->state == XenbusStateClosed) {
-+			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+			       __FUNCTION__, dev->nodename);
-+			xenbus_switch_state(dev, XenbusStateInitWait);
-+		}
-+		break;
++			/* Get the machine address of the PTE for the user 
++			 *  page.
++			 */
++			if ((ret = create_lookup_pte_addr(vma->vm_mm, 
++							  vma->vm_start 
++							  + (i << PAGE_SHIFT), 
++							  &ptep)))
++			{
++				printk(KERN_ERR "Error obtaining PTE pointer "
++				       "(%d).\n", ret);
++				goto undo_map_out;
++			}
++			
++			/* Configure the map operation. */
++		
++			/* The reference is to be used by host CPUs. */
++			flags = GNTMAP_host_map;
++			
++			/* Specifies a user space mapping. */
++			flags |= GNTMAP_application_map;
++			
++			/* The map request contains the machine address of the
++			 * PTE to update.
++			 */
++			flags |= GNTMAP_contains_pte;
++			
++			if (!(vma->vm_flags & VM_WRITE))
++				flags |= GNTMAP_readonly;
 +
-+	case XenbusStateInitialised:
-+	case XenbusStateConnected:
-+		/* Ensure we connect even when two watches fire in 
-+		   close successsion and we miss the intermediate value 
-+		   of frontend_state. */
-+		if (dev->state == XenbusStateConnected)
-+			break;
++			gnttab_set_map_op(&op, ptep, flags, 
++					  private_data->grants[slot_index+i]
++					  .u.valid.ref, 
++					  private_data->grants[slot_index+i]
++					  .u.valid.domid);
 +
-+		err = connect_ring(be);
-+		if (err)
-+			break;
-+		tap_update_blkif_status(be->blkif);
-+		break;
++			/* Carry out the mapping of the grant reference. */
++			ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++							&op, 1);
++			BUG_ON(ret);
++			if (op.status) {
++				printk(KERN_ERR "Error mapping the grant "
++				       "reference into user space (%d). domid "
++				       "= %d; ref = %d\n", op.status,
++				       private_data->grants[slot_index+i].u
++				       .valid.domid,
++				       private_data->grants[slot_index+i].u
++				       .valid.ref);
++				goto undo_map_out;
++			}
++			
++			/* Record the grant handle, for use in the unmap 
++			 * operation. 
++			 */
++			private_data->grants[slot_index+i].u.
++				valid.user_handle = op.handle;
 +
-+	case XenbusStateClosing:
-+		if (be->blkif->xenblkd) {
-+			kthread_stop(be->blkif->xenblkd);
-+			be->blkif->xenblkd = NULL;
++			/* Update p2m structure with the new mapping. */
++			set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
++					    FOREIGN_FRAME(private_data->
++							  grants[slot_index+i]
++							  .u.valid.dev_bus_addr
++							  >> PAGE_SHIFT));
++		} else {
++			/* USING SHADOW PAGE TABLES. */
++			/* In this case, we simply insert the page into the VM
++			 * area. */
++			ret = vm_insert_page(vma, user_vaddr, page);
 +		}
-+		xenbus_switch_state(dev, XenbusStateClosing);
-+		break;
-+
-+	case XenbusStateClosed:
-+		xenbus_switch_state(dev, XenbusStateClosed);
-+		if (xenbus_dev_is_online(dev))
-+			break;
-+		/* fall through if not online */
-+	case XenbusStateUnknown:
-+		device_unregister(&dev->dev);
-+		break;
 +
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+				 frontend_state);
-+		break;
 +	}
-+}
 +
++	up_write(&private_data->grants_sem);
++	return 0;
 +
-+/**
-+ * Switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+	int err;
-+
-+	struct xenbus_device *dev = be->dev;
++undo_map_out:
++	/* If we have a mapping failure, the unmapping will be taken care of
++	 * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
++	 * All we need to do here is free the vma_private_data.
++	 */
++	kfree(vma->vm_private_data);
 +
-+	err = xenbus_switch_state(dev, XenbusStateConnected);
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "switching to Connected state",
-+				 dev->nodename);
++	/* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++	 * to NULL on failure. However, we need this in gntdev_clear_pte() to
++	 * unmap the grants. Therefore, we smuggle a reference to the file's
++	 * private data in the VM area's private data pointer.
++	 */
++	vma->vm_private_data = private_data;
++	
++	up_write(&private_data->grants_sem);
 +
-+	return;
++	return -ENOMEM;
 +}
 +
-+
-+static int connect_ring(struct backend_info *be)
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++			      pte_t *ptep, int is_fullmm)
 +{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	char protocol[64];
-+	int err;
-+
-+	DPRINTK("%s\n", dev->otherend);
-+
-+	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 
-+			    &ring_ref, "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
++	int slot_index, ret;
++	pte_t copy;
++	struct gnttab_unmap_grant_ref op;
++	gntdev_file_private_data_t *private_data;
 +
-+	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
-+			    "%63s", protocol, NULL);
-+	if (err)
-+		strcpy(protocol, "unspecified, assuming native");
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
-+		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+	else {
-+		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-+		return -1;
++	/* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++	 * to NULL on failure. However, we need this in gntdev_clear_pte() to
++	 * unmap the grants. Therefore, we smuggle a reference to the file's
++	 * private data in the VM area's private data pointer.
++	 */
++	if (vma->vm_file) {
++		private_data = (gntdev_file_private_data_t *)
++			vma->vm_file->private_data;
++	} else if (vma->vm_private_data) {
++		private_data = (gntdev_file_private_data_t *)
++			vma->vm_private_data;
++	} else {
++		private_data = NULL; /* gcc warning */
++		BUG();
 +	}
-+	printk(KERN_INFO
-+	       "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-+	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 +
-+	/* Map the shared frame, irq etc. */
-+	err = tap_blkif_map(be->blkif, ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+				 ring_ref, evtchn);
-+		return err;
-+	} 
++	/* Copy the existing value of the PTE for returning. */
++	copy = *ptep;
 +
-+	return 0;
-+}
++	/* Calculate the grant relating to this PTE. */
++	slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
 +
++	/* Only unmap grants if the slot has been mapped. This could be being
++	 * called from a failing mmap().
++	 */
++	if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
 +
-+/* ** Driver Registration ** */
++		/* First, we clear the user space mapping, if it has been made.
++		 */
++		if (private_data->grants[slot_index].u.valid.user_handle !=
++		    GNTDEV_INVALID_HANDLE && 
++		    !xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* NOT USING SHADOW PAGE TABLES. */
++			gnttab_set_unmap_op(&op, virt_to_machine(ptep), 
++					    GNTMAP_contains_pte,
++					    private_data->grants[slot_index]
++					    .u.valid.user_handle);
++			ret = HYPERVISOR_grant_table_op(
++				GNTTABOP_unmap_grant_ref, &op, 1);
++			BUG_ON(ret);
++			if (op.status)
++				printk("User unmap grant status = %d\n", 
++				       op.status);
++		} else {
++			/* USING SHADOW PAGE TABLES. */
++			pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++		}
 +
++		/* Finally, we unmap the grant from kernel space. */
++		gnttab_set_unmap_op(&op, 
++				    get_kernel_vaddr(private_data, slot_index),
++				    GNTMAP_host_map, 
++				    private_data->grants[slot_index].u.valid
++				    .kernel_handle);
++		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 
++						&op, 1);
++		BUG_ON(ret);
++		if (op.status)
++			printk("Kernel unmap grant status = %d\n", op.status);
 +
-+static struct xenbus_device_id blktap_ids[] = {
-+	{ "tap" },
-+	{ "" }
-+};
 +
++		/* Return slot to the not-yet-mapped state, so that it may be
++		 * mapped again, or removed by a subsequent ioctl.
++		 */
++		private_data->grants[slot_index].state = 
++			GNTDEV_SLOT_NOT_YET_MAPPED;
 +
-+static struct xenbus_driver blktap = {
-+	.name = "tap",
-+	.owner = THIS_MODULE,
-+	.ids = blktap_ids,
-+	.probe = blktap_probe,
-+	.remove = blktap_remove,
-+	.otherend_changed = tap_frontend_changed
-+};
++		/* Invalidate the physical to machine mapping for this page. */
++		set_phys_to_machine(__pa(get_kernel_vaddr(private_data, 
++							  slot_index)) 
++				    >> PAGE_SHIFT, INVALID_P2M_ENTRY);
 +
++	} else {
++		pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++	}
 +
-+void tap_blkif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&blktap);
++	return copy;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/char/Makefile tmp-linux-2.6-xen.patch/drivers/xen/char/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/char/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/char/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,2 @@
 +
-+obj-y	:= mem.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/char/mem.c tmp-linux-2.6-xen.patch/drivers/xen/char/mem.c
---- pristine-linux-2.6.18.2/drivers/xen/char/mem.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/char/mem.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,203 @@
-+/*
-+ *  Originally from linux/drivers/char/mem.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  Added devfs support. 
-+ *    Jan-11-1998, C. Scott Ananian <cananian at alumni.princeton.edu>
-+ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj at sgi.com>
++/* "Destructor" for a VM area.
 + */
-+
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mman.h>
-+#include <linux/random.h>
-+#include <linux/init.h>
-+#include <linux/raw.h>
-+#include <linux/tty.h>
-+#include <linux/capability.h>
-+#include <linux/smp_lock.h>
-+#include <linux/ptrace.h>
-+#include <linux/device.h>
-+#include <asm/pgalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+
-+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
-+{
-+	return 1;
++static void gntdev_vma_close(struct vm_area_struct *vma) {
++	if (vma->vm_private_data) {
++		kfree(vma->vm_private_data);
++	}
 +}
-+#endif
 +
-+/*
-+ * This funcion reads the *physical* memory. The f_pos points directly to the 
-+ * memory location. 
++/* Called when an ioctl is made on the device.
 + */
-+static ssize_t read_mem(struct file * file, char __user * buf,
-+			size_t count, loff_t *ppos)
++static long gntdev_ioctl(struct file *flip,
++			 unsigned int cmd, unsigned long arg)
 +{
-+	unsigned long p = *ppos, ignored;
-+	ssize_t read = 0, sz;
-+	void __iomem *v;
-+
-+	if (!valid_phys_addr_range(p, count))
-+		return -EFAULT;
++	int rc = 0;
++	gntdev_file_private_data_t *private_data = 
++		(gntdev_file_private_data_t *) flip->private_data;
 +
-+	while (count > 0) {
-+		/*
-+		 * Handle first page in case it's not aligned
++	/* On the first invocation, we will lazily initialise the grant array
++	 * and free-list.
++	 */
++	if (unlikely(!private_data->grants) 
++	    && likely(cmd != IOCTL_GNTDEV_SET_MAX_GRANTS)) {
++		down_write(&private_data->grants_sem);
++		
++		if (unlikely(private_data->grants)) {
++			up_write(&private_data->grants_sem);
++			goto private_data_initialised;
++		}
++		
++		/* Just use the default. Setting to a non-default is handled
++		 * in the ioctl switch.
 +		 */
-+		if (-p & (PAGE_SIZE - 1))
-+			sz = -p & (PAGE_SIZE - 1);
-+		else
-+			sz = PAGE_SIZE;
-+
-+		sz = min_t(unsigned long, sz, count);
++		rc = init_private_data(private_data, DEFAULT_MAX_GRANTS);
++		
++		up_write(&private_data->grants_sem);
 +
-+		v = xlate_dev_mem_ptr(p, sz);
-+		if (IS_ERR(v) || v == NULL) {
-+			/*
-+			 * Some programs (e.g., dmidecode) groove off into
-+			 * weird RAM areas where no tables can possibly exist
-+			 * (because Xen will have stomped on them!). These
-+			 * programs get rather upset if we let them know that
-+			 * Xen failed their access, so we fake out a read of
-+			 * all zeroes.
-+			 */
-+			if (clear_user(buf, count))
-+				return -EFAULT;
-+			read += count;
-+			break;
++		if (rc) {
++			printk (KERN_ERR "Initialising gntdev private data "
++				"failed.\n");
++			return rc;
 +		}
-+
-+		ignored = copy_to_user(buf, v, sz);
-+		xlate_dev_mem_ptr_unmap(v);
-+		if (ignored)
-+			return -EFAULT;
-+		buf += sz;
-+		p += sz;
-+		count -= sz;
-+		read += sz;
 +	}
++	    
++private_data_initialised:
++	switch (cmd) {
++	case IOCTL_GNTDEV_MAP_GRANT_REF:
++	{
++		struct ioctl_gntdev_map_grant_ref op;
++		down_write(&private_data->grants_sem);
++		down_write(&private_data->free_list_sem);
 +
-+	*ppos += read;
-+	return read;
-+}
-+
-+static ssize_t write_mem(struct file * file, const char __user * buf, 
-+			 size_t count, loff_t *ppos)
-+{
-+	unsigned long p = *ppos, ignored;
-+	ssize_t written = 0, sz;
-+	void __iomem *v;
-+
-+	if (!valid_phys_addr_range(p, count))
-+		return -EFAULT;
-+
-+	while (count > 0) {
-+		/*
-+		 * Handle first page in case it's not aligned
-+		 */
-+		if (-p & (PAGE_SIZE - 1))
-+			sz = -p & (PAGE_SIZE - 1);
-+		else
-+			sz = PAGE_SIZE;
-+
-+		sz = min_t(unsigned long, sz, count);
-+
-+		v = xlate_dev_mem_ptr(p, sz);
-+		if (v == NULL)
-+			break;
-+		if (IS_ERR(v)) {
-+			if (written == 0)
-+				return PTR_ERR(v);
-+			break;
++		if ((rc = copy_from_user(&op, (void __user *) arg, 
++					 sizeof(op)))) {
++			rc = -EFAULT;
++			goto map_out;
++		}
++		if (unlikely(op.count <= 0)) {
++			rc = -EINVAL;
++			goto map_out;
 +		}
 +
-+		ignored = copy_from_user(v, buf, sz);
-+		xlate_dev_mem_ptr_unmap(v);
-+		if (ignored) {
-+			written += sz - ignored;
-+			if (written)
-+				break;
-+			return -EFAULT;
++		if (op.count == 1) {
++			if ((rc = add_grant_reference(flip, &op.refs[0],
++						      &op.index)) < 0) {
++				printk(KERN_ERR "Adding grant reference "
++				       "failed (%d).\n", rc);
++				goto map_out;
++			}
++		} else {
++			struct ioctl_gntdev_grant_ref *refs, *u;
++			refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
++			if (!refs) {
++				rc = -ENOMEM;
++				goto map_out;
++			}
++			u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
++			if ((rc = copy_from_user(refs,
++						 (void __user *)u,
++						 sizeof(*refs) * op.count))) {
++				printk(KERN_ERR "Copying refs from user failed"
++				       " (%d).\n", rc);
++				rc = -EINVAL;
++				goto map_out;
++			}
++			if ((rc = find_contiguous_free_range(flip, op.count))
++			    < 0) {
++				printk(KERN_ERR "Finding contiguous range "
++				       "failed (%d).\n", rc);
++				kfree(refs);
++				goto map_out;
++			}
++			op.index = rc << PAGE_SHIFT;
++			if ((rc = add_grant_references(flip, op.count,
++						       refs, rc))) {
++				printk(KERN_ERR "Adding grant references "
++				       "failed (%d).\n", rc);
++				kfree(refs);
++				goto map_out;
++			}
++			compress_free_list(flip);
++			kfree(refs);
 +		}
-+		buf += sz;
-+		p += sz;
-+		count -= sz;
-+		written += sz;
++		if ((rc = copy_to_user((void __user *) arg, 
++				       &op, 
++				       sizeof(op)))) {
++			printk(KERN_ERR "Copying result back to user failed "
++			       "(%d)\n", rc);
++			rc = -EFAULT;
++			goto map_out;
++		}
++	map_out:
++		up_write(&private_data->grants_sem);
++		up_write(&private_data->free_list_sem);
++		return rc;
 +	}
++	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
++	{
++		struct ioctl_gntdev_unmap_grant_ref op;
++		int i, start_index;
 +
-+	*ppos += written;
-+	return written;
-+}
++		down_write(&private_data->grants_sem);
++		down_write(&private_data->free_list_sem);
 +
-+#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
-+static inline int uncached_access(struct file *file)
-+{
-+	if (file->f_flags & O_SYNC)
-+		return 1;
-+	/* Xen sets correct MTRR type on non-RAM for us. */
-+	return 0;
-+}
++		if ((rc = copy_from_user(&op, 
++					 (void __user *) arg, 
++					 sizeof(op)))) {
++			rc = -EFAULT;
++			goto unmap_out;
++		}
 +
-+static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
-+{
-+	size_t size = vma->vm_end - vma->vm_start;
++		start_index = op.index >> PAGE_SHIFT;
 +
-+	if (uncached_access(file))
-+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++		/* First, check that all pages are in the NOT_YET_MAPPED
++		 * state.
++		 */
++		for (i = 0; i < op.count; ++i) {
++			if (unlikely
++			    (private_data->grants[start_index + i].state
++			     != GNTDEV_SLOT_NOT_YET_MAPPED)) {
++				if (private_data->grants[start_index + i].state
++				    == GNTDEV_SLOT_INVALID) {
++					printk(KERN_ERR
++					       "Tried to remove an invalid "
++					       "grant at offset 0x%x.",
++					       (start_index + i) 
++					       << PAGE_SHIFT);
++					rc = -EINVAL;
++				} else {
++					printk(KERN_ERR
++					       "Tried to remove a grant which "
++					       "is currently mmap()-ed at "
++					       "offset 0x%x.",
++					       (start_index + i) 
++					       << PAGE_SHIFT);
++					rc = -EBUSY;
++				}
++				goto unmap_out;
++			}
++		}
++
++		/* Unmap pages and add them to the free list.
++		 */
++		for (i = 0; i < op.count; ++i) {
++			private_data->grants[start_index+i].state = 
++				GNTDEV_SLOT_INVALID;
++			private_data->grants[start_index+i].u.free_list_index =
++				private_data->free_list_size;
++			private_data->free_list[private_data->free_list_size] =
++				start_index + i;
++			++private_data->free_list_size;
++		}
 +
-+	/* We want to return the real error code, not EAGAIN. */
-+	return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+				      size, vma->vm_page_prot, DOMID_IO);
-+}
-+#endif
++	unmap_out:
++		up_write(&private_data->grants_sem);
++		up_write(&private_data->free_list_sem);
++		return rc;
++	}
++	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
++	{
++		struct ioctl_gntdev_get_offset_for_vaddr op;
++		struct vm_area_struct *vma;
++		unsigned long vaddr;
 +
-+/*
-+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
-+ * check against negative addresses: they are ok. The return value is weird,
-+ * though, in that case (0).
-+ *
-+ * also note that seeking relative to the "end of file" isn't supported:
-+ * it has no meaning, so it returns -EINVAL.
-+ */
-+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-+{
-+	loff_t ret;
++		if ((rc = copy_from_user(&op, 
++					 (void __user *) arg, 
++					 sizeof(op)))) {
++			rc = -EFAULT;
++			goto get_offset_out;
++		}
++		vaddr = (unsigned long)op.vaddr;
 +
-+	mutex_lock(&file->f_dentry->d_inode->i_mutex);
-+	switch (orig) {
-+		case 0:
-+			file->f_pos = offset;
-+			ret = file->f_pos;
-+			force_successful_syscall_return();
-+			break;
-+		case 1:
-+			file->f_pos += offset;
-+			ret = file->f_pos;
-+			force_successful_syscall_return();
-+			break;
-+		default:
-+			ret = -EINVAL;
++		down_read(&current->mm->mmap_sem);		
++		vma = find_vma(current->mm, vaddr);
++		if (vma == NULL) {
++			rc = -EFAULT;
++			goto get_offset_unlock_out;
++		}
++		if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
++			printk(KERN_ERR "The vaddr specified does not belong "
++			       "to a gntdev instance: %#lx\n", vaddr);
++			rc = -EFAULT;
++			goto get_offset_unlock_out;
++		}
++		if (vma->vm_start != vaddr) {
++			printk(KERN_ERR "The vaddr specified in an "
++			       "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
++			       "the start of the VM area. vma->vm_start = "
++			       "%#lx; vaddr = %#lx\n",
++			       vma->vm_start, vaddr);
++			rc = -EFAULT;
++			goto get_offset_unlock_out;
++		}
++		op.offset = vma->vm_pgoff << PAGE_SHIFT;
++		op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++		up_read(&current->mm->mmap_sem);
++		if ((rc = copy_to_user((void __user *) arg, 
++				       &op, 
++				       sizeof(op)))) {
++			rc = -EFAULT;
++			goto get_offset_out;
++		}
++		goto get_offset_out;
++	get_offset_unlock_out:
++		up_read(&current->mm->mmap_sem);
++	get_offset_out:
++		return rc;
++	}
++	case IOCTL_GNTDEV_SET_MAX_GRANTS:
++	{
++		struct ioctl_gntdev_set_max_grants op;
++		if ((rc = copy_from_user(&op, 
++					 (void __user *) arg, 
++					 sizeof(op)))) {
++			rc = -EFAULT;
++			goto set_max_out;
++		}
++		down_write(&private_data->grants_sem);
++		if (private_data->grants) {
++			rc = -EBUSY;
++			goto set_max_unlock_out;
++		}
++		if (op.count > MAX_GRANTS_LIMIT) {
++			rc = -EINVAL;
++			goto set_max_unlock_out;
++		}						 
++		rc = init_private_data(private_data, op.count);
++	set_max_unlock_out:
++		up_write(&private_data->grants_sem);
++	set_max_out:
++		return rc;
++	}
++	default:
++		return -ENOIOCTLCMD;
 +	}
-+	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
-+	return ret;
-+}
 +
-+static int open_mem(struct inode * inode, struct file * filp)
-+{
-+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++	return 0;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
 +
-+const struct file_operations mem_fops = {
-+	.llseek		= memory_lseek,
-+	.read		= read_mem,
-+	.write		= write_mem,
-+	.mmap		= xen_mmap_mem,
-+	.open		= open_mem,
-+};
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/console/console.c tmp-linux-2.6-xen.patch/drivers/xen/console/console.c
---- pristine-linux-2.6.18.2/drivers/xen/console/console.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/console/console.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,721 @@
++netbk-y   := netback.o xenbus.o interface.o accel.o
++netloop-y := loopback.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/accel.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/accel.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,269 @@
 +/******************************************************************************
-+ * console.c
-+ * 
-+ * Virtual console driver.
++ * drivers/xen/netback/accel.c
++ *
++ * Interface between backend virtual network device and accelerated plugin. 
 + * 
-+ * Copyright (c) 2002-2004, K A Fraser.
++ * Copyright (C) 2007 Solarflare Communications, Inc
 + * 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License version 2
@@ -56979,706 +93018,830 @@
 + * IN THE SOFTWARE.
 + */
 +
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+#include <linux/bootmem.h>
-+#include <linux/sysrq.h>
-+#include <linux/screen_info.h>
-+#include <linux/vt.h>
-+#include <asm/io.h>
-+#include <asm/irq.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/event_channel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
++#include <linux/list.h>
++#include <asm/atomic.h>
 +#include <xen/xenbus.h>
-+#include <xen/xencons.h>
-+
-+/*
-+ * Modes:
-+ *  'xencons=off'  [XC_OFF]:     Console is disabled.
-+ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
-+ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
-+ *  'xencons=xvc'  [XC_XVC]:     Console attached to '/dev/xvc0'.
-+ *  default:                     DOM0 -> XC_SERIAL ; all others -> XC_TTY.
-+ * 
-+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
-+ * warnings from standard distro startup scripts.
-+ */
-+static enum {
-+	XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
-+} xc_mode;
-+static int xc_num = -1;
++#include <linux/mutex.h>
 +
-+/* /dev/xvc0 device number allocated by lanana.org. */
-+#define XEN_XVC_MAJOR 204
-+#define XEN_XVC_MINOR 191
++#include "common.h"
 +
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static unsigned long sysrq_requested;
-+extern int sysrq_enabled;
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...)						\
++	printk("netback/accel (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
 +#endif
 +
-+void xencons_early_setup(void)
-+{
-+	extern int console_use_vt;
++/* 
++ * A list of available netback accelerator plugin modules (each list
++ * entry is of type struct netback_accelerator) 
++ */ 
++static struct list_head accelerators_list;
++/* Lock used to protect access to accelerators_list */
++DEFINE_MUTEX(accelerators_mutex);
 +
-+	if (is_initial_xendomain()) {
-+		xc_mode = XC_SERIAL;
++/* 
++ * Compare a backend to an accelerator, and decide if they are
++ * compatible (i.e. if the accelerator should be used by the
++ * backend) 
++ */
++static int match_accelerator(struct xenbus_device *xendev,
++			     struct backend_info *be, 
++			     struct netback_accelerator *accelerator)
++{
++	int rc = 0;
++	char *eth_name = xenbus_read(XBT_NIL, xendev->nodename, "accel", NULL);
++	
++	if (IS_ERR(eth_name)) {
++		/* Probably means not present */
++		DPRINTK("%s: no match due to xenbus_read accel error %d\n", 
++			__FUNCTION__, PTR_ERR(eth_name));
++		return 0;
 +	} else {
-+		xc_mode = XC_TTY;
-+		console_use_vt = 0;
++		if (!strcmp(eth_name, accelerator->eth_name))
++			rc = 1;
++		kfree(eth_name);
++		return rc;
 +	}
 +}
 +
-+static int __init xencons_setup(char *str)
-+{
-+	char *q;
-+	int n;
-+	extern int console_use_vt;
 +
-+	console_use_vt = 1;
-+	if (!strncmp(str, "ttyS", 4)) {
-+		xc_mode = XC_SERIAL;
-+		str += 4;
-+	} else if (!strncmp(str, "tty", 3)) {
-+		xc_mode = XC_TTY;
-+		str += 3;
-+		console_use_vt = 0;
-+	} else if (!strncmp(str, "xvc", 3)) {
-+		xc_mode = XC_XVC;
-+		str += 3;
-+	} else if (!strncmp(str, "off", 3)) {
-+		xc_mode = XC_OFF;
-+		str += 3;
++static void do_probe(struct backend_info *be, 
++		     struct netback_accelerator *accelerator,
++		     struct xenbus_device *xendev) 
++{
++	be->accelerator = accelerator;
++	atomic_inc(&be->accelerator->use_count);
++	if (be->accelerator->hooks->probe(xendev) != 0) {
++		atomic_dec(&be->accelerator->use_count);
++		module_put(be->accelerator->hooks->owner);
++		be->accelerator = NULL;
 +	}
-+
-+	n = simple_strtol(str, &q, 10);
-+	if (q != str)
-+		xc_num = n;
-+
-+	return 1;
 +}
-+__setup("xencons=", xencons_setup);
 +
-+/* The kernel and user-land drivers share a common transmit buffer. */
-+static unsigned int wbuf_size = 4096;
-+#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
-+static char *wbuf;
-+static unsigned int wc, wp; /* write_cons, write_prod */
 +
-+static int __init xencons_bufsz_setup(char *str)
++/*
++ * Notify suitable backends that a new accelerator is available and
++ * connected.  This will also notify the accelerator plugin module
++ * that it is being used for a device through the probe hook.
++ */
++static int netback_accelerator_probe_backend(struct device *dev, void *arg)
 +{
-+	unsigned int goal;
-+	goal = simple_strtoul(str, NULL, 0);
-+	if (goal) {
-+		goal = roundup_pow_of_two(goal);
-+		if (wbuf_size < goal)
-+			wbuf_size = goal;
++	struct netback_accelerator *accelerator = 
++		(struct netback_accelerator *)arg;
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++
++	if (!strcmp("vif", xendev->devicetype)) {
++		struct backend_info *be = xendev->dev.driver_data;
++
++		if (match_accelerator(xendev, be, accelerator) &&
++		    try_module_get(accelerator->hooks->owner)) {
++			do_probe(be, accelerator, xendev);
++		}
 +	}
-+	return 1;
++	return 0;
 +}
-+__setup("xencons_bufsz=", xencons_bufsz_setup);
 +
-+/* This lock protects accesses to the common transmit buffer. */
-+static DEFINE_SPINLOCK(xencons_lock);
 +
-+/* Common transmit-kick routine. */
-+static void __xencons_tx_flush(void);
++/*
++ * Notify suitable backends that an accelerator is unavailable.
++ */
++static int netback_accelerator_remove_backend(struct device *dev, void *arg)
++{
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct netback_accelerator *accelerator = 
++		(struct netback_accelerator *)arg;
++	
++	if (!strcmp("vif", xendev->devicetype)) {
++		struct backend_info *be = xendev->dev.driver_data;
 +
-+static struct tty_driver *xencons_driver;
++		if (be->accelerator == accelerator) {
++			be->accelerator->hooks->remove(xendev);
++			atomic_dec(&be->accelerator->use_count);
++			module_put(be->accelerator->hooks->owner);
++			be->accelerator = NULL;
++		}
++	}
++	return 0;
++}
 +
-+/******************** Kernel console driver ********************************/
 +
-+static void kcons_write(struct console *c, const char *s, unsigned int count)
-+{
-+	int           i = 0;
-+	unsigned long flags;
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
++/*
++ * Entry point for an netback accelerator plugin module.  Called to
++ * advertise its presence, and connect to any suitable backends.
++ */
++int netback_connect_accelerator(unsigned version, int id, const char *eth_name, 
++				struct netback_accel_hooks *hooks)
++{
++	struct netback_accelerator *new_accelerator;
++	unsigned eth_name_len;
 +
-+	while (i < count) {
-+		for (; i < count; i++) {
-+			if ((wp - wc) >= (wbuf_size - 1))
-+				break;
-+			if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
-+				wbuf[WBUF_MASK(wp++)] = '\r';
++	if (version != NETBACK_ACCEL_VERSION) {
++		if (version > NETBACK_ACCEL_VERSION) {
++			/* Caller has higher version number, leave it
++			   up to them to decide whether to continue.
++			   They can recall with a lower number if
++			   they're happy to be compatible with us */
++			return NETBACK_ACCEL_VERSION;
++		} else {
++			/* We have a more recent version than caller.
++			   Currently reject, but may in future be able
++			   to be backwardly compatible */
++			return -EPROTO;
 +		}
++	}
 +
-+		__xencons_tx_flush();
++	new_accelerator = 
++		kmalloc(sizeof(struct netback_accelerator), GFP_KERNEL);
++	if (!new_accelerator) {
++		DPRINTK("%s: failed to allocate memory for accelerator\n",
++			__FUNCTION__);
++		return -ENOMEM;
 +	}
 +
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	new_accelerator->id = id;
++	
++	eth_name_len = strlen(eth_name)+1;
++	new_accelerator->eth_name = kmalloc(eth_name_len, GFP_KERNEL);
++	if (!new_accelerator->eth_name) {
++		DPRINTK("%s: failed to allocate memory for eth_name string\n",
++			__FUNCTION__);
++		kfree(new_accelerator);
++		return -ENOMEM;
++	}
++	strlcpy(new_accelerator->eth_name, eth_name, eth_name_len);
++	
++	new_accelerator->hooks = hooks;
++
++	atomic_set(&new_accelerator->use_count, 0);
++	
++	mutex_lock(&accelerators_mutex);
++	list_add(&new_accelerator->link, &accelerators_list);
++	
++	/* tell existing backends about new plugin */
++	xenbus_for_each_backend(new_accelerator, 
++				netback_accelerator_probe_backend);
++
++	mutex_unlock(&accelerators_mutex);
++
++	return 0;
++
 +}
++EXPORT_SYMBOL_GPL(netback_connect_accelerator);
 +
-+static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
++
++/* 
++ * Disconnect an accelerator plugin module that has previously been
++ * connected.
++ */
++void netback_disconnect_accelerator(int id, const char *eth_name)
 +{
++	struct netback_accelerator *accelerator, *next;
 +
-+	while (count > 0) {
-+		int rc;
-+		rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
-+		if (rc <= 0)
++	mutex_lock(&accelerators_mutex);
++	list_for_each_entry_safe(accelerator, next, &accelerators_list, link) {
++		if (!strcmp(eth_name, accelerator->eth_name)) {
++			xenbus_for_each_backend
++				(accelerator, netback_accelerator_remove_backend);
++			BUG_ON(atomic_read(&accelerator->use_count) != 0);
++			list_del(&accelerator->link);				
++			kfree(accelerator->eth_name);
++			kfree(accelerator);
 +			break;
-+		count -= rc;
-+		s += rc;
++		}
 +	}
++	mutex_unlock(&accelerators_mutex);
 +}
++EXPORT_SYMBOL_GPL(netback_disconnect_accelerator);
 +
-+static struct tty_driver *kcons_device(struct console *c, int *index)
++
++void netback_probe_accelerators(struct backend_info *be,
++				struct xenbus_device *dev)
 +{
-+	*index = 0;
-+	return xencons_driver;
++	struct netback_accelerator *accelerator;
++
++	/* 
++	 * Check list of accelerators to see if any is suitable, and
++	 * use it if it is.
++	 */
++	mutex_lock(&accelerators_mutex);
++	list_for_each_entry(accelerator, &accelerators_list, link) { 
++		if (match_accelerator(dev, be, accelerator) &&
++		    try_module_get(accelerator->hooks->owner)) {
++			do_probe(be, accelerator, dev);
++			break;
++		}
++	}
++	mutex_unlock(&accelerators_mutex);
 +}
 +
-+static struct console kcons_info = {
-+	.device	= kcons_device,
-+	.flags	= CON_PRINTBUFFER | CON_ENABLED,
-+	.index	= -1,
-+};
 +
-+static int __init xen_console_init(void)
++void netback_remove_accelerators(struct backend_info *be,
++				 struct xenbus_device *dev)
 +{
-+	if (!is_running_on_xen())
-+		goto out;
-+
-+	if (is_initial_xendomain()) {
-+		kcons_info.write = kcons_write_dom0;
-+	} else {
-+		if (!xen_start_info->console.domU.evtchn)
-+			goto out;
-+		kcons_info.write = kcons_write;
++	mutex_lock(&accelerators_mutex);
++	/* Notify the accelerator (if any) of this device's removal */
++	if (be->accelerator != NULL) {
++		be->accelerator->hooks->remove(dev);
++		atomic_dec(&be->accelerator->use_count);
++		module_put(be->accelerator->hooks->owner);
++		be->accelerator = NULL;
 +	}
++	mutex_unlock(&accelerators_mutex);
++}
 +
-+	switch (xc_mode) {
-+	case XC_XVC:
-+		strcpy(kcons_info.name, "xvc");
-+		if (xc_num == -1)
-+			xc_num = 0;
-+		break;
 +
-+	case XC_SERIAL:
-+		strcpy(kcons_info.name, "ttyS");
-+		if (xc_num == -1)
-+			xc_num = 0;
-+		break;
++void netif_accel_init(void)
++{
++	INIT_LIST_HEAD(&accelerators_list);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,217 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	case XC_TTY:
-+		strcpy(kcons_info.name, "tty");
-+		if (xc_num == -1)
-+			xc_num = 1;
-+		break;
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
 +
-+	default:
-+		goto out;
-+	}
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
 +
-+	wbuf = alloc_bootmem(wbuf_size);
++#define DPRINTK(_f, _a...)			\
++	pr_debug("(file=%s, line=%d) " _f,	\
++		 __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...)				\
++	printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...)				\
++	printk(KERN_WARNING "xen_net: " fmt, ##args)
 +
-+	register_console(&kcons_info);
++typedef struct netif_st {
++	/* Unique identifier for this interface. */
++	domid_t          domid;
++	unsigned int     handle;
 +
-+ out:
-+	return 0;
-+}
-+console_initcall(xen_console_init);
++	u8               fe_dev_addr[6];
 +
-+/*** Useful function for console debugging -- goes straight to Xen. ***/
-+asmlinkage int xprintk(const char *fmt, ...)
-+{
-+	va_list args;
-+	int printk_len;
-+	static char printk_buf[1024];
++	/* Physical parameters of the comms window. */
++	grant_handle_t   tx_shmem_handle;
++	grant_ref_t      tx_shmem_ref;
++	grant_handle_t   rx_shmem_handle;
++	grant_ref_t      rx_shmem_ref;
++	unsigned int     irq;
 +
-+	/* Emit the output into the temporary buffer */
-+	va_start(args, fmt);
-+	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-+	va_end(args);
++	/* The shared rings and indexes. */
++	netif_tx_back_ring_t tx;
++	netif_rx_back_ring_t rx;
++	struct vm_struct *tx_comms_area;
++	struct vm_struct *rx_comms_area;
 +
-+	/* Send the processed output directly to Xen. */
-+	kcons_write_dom0(NULL, printk_buf, printk_len);
++	/* Set of features that can be turned on in dev->features. */
++	int features;
 +
-+	return 0;
-+}
++	/* Internal feature information. */
++	u8 can_queue:1;	/* can queue packets for receiver? */
++	u8 copying_receiver:1;	/* copy packets to receiver?       */
 +
-+/*** Forcibly flush console data before dying. ***/
-+void xencons_force_flush(void)
-+{
-+	int sz;
++	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++	RING_IDX rx_req_cons_peek;
 +
-+	/* Emergency console is synchronous, so there's nothing to flush. */
-+	if (!is_running_on_xen() ||
-+	    is_initial_xendomain() ||
-+	    !xen_start_info->console.domU.evtchn)
-+		return;
++	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++	unsigned long   credit_bytes;
++	unsigned long   credit_usec;
++	unsigned long   remaining_credit;
++	struct timer_list credit_timeout;
 +
-+	/* Spin until console data is flushed through to the daemon. */
-+	while (wc != wp) {
-+		int sent = 0;
-+		if ((sz = wp - wc) == 0)
-+			continue;
-+		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+		if (sent > 0)
-+			wc += sent;
-+	}
-+}
++	/* Enforce draining of the transmit queue. */
++	struct timer_list tx_queue_timeout;
 +
++	/* Miscellaneous private stuff. */
++	struct list_head list;  /* scheduling list */
++	atomic_t         refcnt;
++	struct net_device *dev;
++	struct net_device_stats stats;
 +
-+void dom0_init_screen_info(const struct dom0_vga_console_info *info)
-+{
-+	switch (info->video_type) {
-+	case XEN_VGATYPE_TEXT_MODE_3:
-+		screen_info.orig_video_mode = 3;
-+		screen_info.orig_video_ega_bx = 3;
-+		screen_info.orig_video_isVGA = 1;
-+		screen_info.orig_video_lines = info->u.text_mode_3.rows;
-+		screen_info.orig_video_cols = info->u.text_mode_3.columns;
-+		screen_info.orig_x = info->u.text_mode_3.cursor_x;
-+		screen_info.orig_y = info->u.text_mode_3.cursor_y;
-+		screen_info.orig_video_points =
-+			info->u.text_mode_3.font_height;
-+		break;
-+	case XEN_VGATYPE_VESA_LFB:
-+		screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
-+		screen_info.lfb_width = info->u.vesa_lfb.width;
-+		screen_info.lfb_height = info->u.vesa_lfb.height;
-+		screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
-+		screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
-+		screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
-+		screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
-+		screen_info.red_size = info->u.vesa_lfb.red_size;
-+		screen_info.red_pos = info->u.vesa_lfb.red_pos;
-+		screen_info.green_size = info->u.vesa_lfb.green_size;
-+		screen_info.green_pos = info->u.vesa_lfb.green_pos;
-+		screen_info.blue_size = info->u.vesa_lfb.blue_size;
-+		screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
-+		screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
-+		screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
-+		break;
-+	}
-+}
++	unsigned int carrier;
++
++	wait_queue_head_t waiting_to_free;
++} netif_t;
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif)	((netif)->carrier = 1)
++#define netback_carrier_off(netif)	((netif)->carrier = 0)
++#define netback_carrier_ok(netif)	((netif)->carrier)
 +
++enum {
++	NETBK_DONT_COPY_SKB,
++	NETBK_DELAYED_COPY_SKB,
++	NETBK_ALWAYS_COPY_SKB,
++};
 +
-+/******************** User-space console driver (/dev/console) ************/
++extern int netbk_copy_skb_mode;
 +
-+#define DRV(_d)         (_d)
-+#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) &&		\
-+			 ((_tty)->index != (xc_num - 1)))
++/* Function pointers into netback accelerator plugin modules */
++struct netback_accel_hooks {
++	struct module *owner;
++	int  (*probe)(struct xenbus_device *dev);
++	int (*remove)(struct xenbus_device *dev);
++};
 +
-+static struct termios *xencons_termios[MAX_NR_CONSOLES];
-+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
-+static struct tty_struct *xencons_tty;
-+static int xencons_priv_irq;
-+static char x_char;
++/* Structure to track the state of a netback accelerator plugin */
++struct netback_accelerator {
++	struct list_head link;
++	int id;
++	char *eth_name;
++	atomic_t use_count;
++	struct netback_accel_hooks *hooks;
++};
 +
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
-+{
-+	int           i;
-+	unsigned long flags;
++struct backend_info {
++	struct xenbus_device *dev;
++	netif_t *netif;
++	enum xenbus_state frontend_state;
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	if (xencons_tty == NULL)
-+		goto out;
++	/* State relating to the netback accelerator */
++	void *netback_accel_priv;
++	/* The accelerator that this backend is currently using */
++	struct netback_accelerator *accelerator;
++};
 +
-+	for (i = 0; i < len; i++) {
-+#ifdef CONFIG_MAGIC_SYSRQ
-+		if (sysrq_enabled) {
-+			if (buf[i] == '\x0f') { /* ^O */
-+				if (!sysrq_requested) {
-+					sysrq_requested = jiffies;
-+					continue; /* don't print sysrq key */
-+				}
-+				sysrq_requested = 0;
-+			} else if (sysrq_requested) {
-+				unsigned long sysrq_timeout =
-+					sysrq_requested + HZ*2;
-+				sysrq_requested = 0;
-+				if (time_before(jiffies, sysrq_timeout)) {
-+					spin_unlock_irqrestore(
-+						&xencons_lock, flags);
-+					handle_sysrq(
-+						buf[i], regs, xencons_tty);
-+					spin_lock_irqsave(
-+						&xencons_lock, flags);
-+					continue;
-+				}
-+			}
-+		}
-+#endif
-+		tty_insert_flip_char(xencons_tty, buf[i], 0);
-+	}
-+	tty_flip_buffer_push(xencons_tty);
++#define NETBACK_ACCEL_VERSION 0x00010001
 +
-+ out:
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
++/* 
++ * Connect an accelerator plugin module to netback.  Returns zero on
++ * success, < 0 on error, > 0 (with highest version number supported)
++ * if version mismatch.
++ */
++extern int netback_connect_accelerator(unsigned version,
++				       int id, const char *eth_name, 
++				       struct netback_accel_hooks *hooks);
++/* Disconnect a previously connected accelerator plugin module */
++extern void netback_disconnect_accelerator(int id, const char *eth_name);
++
++
++extern
++void netback_probe_accelerators(struct backend_info *be,
++				struct xenbus_device *dev);
++extern
++void netback_remove_accelerators(struct backend_info *be,
++				 struct xenbus_device *dev);
++extern
++void netif_accel_init(void);
 +
-+static void __xencons_tx_flush(void)
-+{
-+	int sent, sz, work_done = 0;
 +
-+	if (x_char) {
-+		if (is_initial_xendomain())
-+			kcons_write_dom0(NULL, &x_char, 1);
-+		else
-+			while (x_char)
-+				if (xencons_ring_send(&x_char, 1) == 1)
-+					break;
-+		x_char = 0;
-+		work_done = 1;
-+	}
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
 +
-+	while (wc != wp) {
-+		sz = wp - wc;
-+		if (sz > (wbuf_size - WBUF_MASK(wc)))
-+			sz = wbuf_size - WBUF_MASK(wc);
-+		if (is_initial_xendomain()) {
-+			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-+			wc += sz;
-+		} else {
-+			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+			if (sent == 0)
-+				break;
-+			wc += sent;
-+		}
-+		work_done = 1;
-+	}
++void netif_disconnect(netif_t *netif);
 +
-+	if (work_done && (xencons_tty != NULL)) {
-+		wake_up_interruptible(&xencons_tty->write_wait);
-+		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-+		    (xencons_tty->ldisc.write_wakeup != NULL))
-+			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
-+	}
-+}
++netif_t *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++	      unsigned long rx_ring_ref, unsigned int evtchn);
 +
-+void xencons_tx(void)
-+{
-+	unsigned long flags;
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b)						\
++	do {							\
++		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
++			wake_up(&(_b)->waiting_to_free);	\
++	} while (0)
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
++void netif_xenbus_init(void);
 +
-+/* Privileged receive callback and transmit kicker. */
-+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
-+					  struct pt_regs *regs)
-+{
-+	static char rbuf[16];
-+	int         l;
++#define netif_schedulable(netif)				\
++	(netif_running((netif)->dev) && netback_carrier_ok(netif))
 +
-+	while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
-+		xencons_rx(rbuf, l, regs);
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
 +
-+	xencons_tx();
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 +
-+	return IRQ_HANDLED;
++static inline int netbk_can_queue(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	return netif->can_queue;
 +}
 +
-+static int xencons_write_room(struct tty_struct *tty)
++static inline int netbk_can_sg(struct net_device *dev)
 +{
-+	return wbuf_size - (wp - wc);
++	netif_t *netif = netdev_priv(dev);
++	return netif->features & NETIF_F_SG;
 +}
 +
-+static int xencons_chars_in_buffer(struct tty_struct *tty)
++#endif /* __NETIF__BACKEND__COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/interface.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/interface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,336 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ * 
++ * Network-device interface management.
++ * 
++ * Copyright (c) 2004-2005, Keir Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++
++/*
++ * Module parameter 'queue_length':
++ * 
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors. Although this feature can improve receive bandwidth by avoiding
++ * packet loss, it can also result in packets sitting in the 'tx_queue' for
++ * unbounded time. This is bad if those packets hold onto foreign resources.
++ * For example, consider a packet that holds onto resources belonging to the
++ * guest for which it is queued (e.g., packet received on vif1.0, destined for
++ * vif1.1 which is not activated in the guest): in this situation the guest
++ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
++ * run a timer (tx_queue_timeout) to drain the queue when the interface is
++ * blocked.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0);
++
++static void __netif_up(netif_t *netif)
 +{
-+	return wp - wc;
++	enable_irq(netif->irq);
++	netif_schedule_work(netif);
 +}
 +
-+static void xencons_send_xchar(struct tty_struct *tty, char ch)
++static void __netif_down(netif_t *netif)
 +{
-+	unsigned long flags;
++	disable_irq(netif->irq);
++	netif_deschedule_work(netif);
++}
 +
-+	if (DUMMY_TTY(tty))
-+		return;
++static int net_open(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	if (netback_carrier_ok(netif)) {
++		__netif_up(netif);
++		netif_start_queue(dev);
++	}
++	return 0;
++}
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	x_char = ch;
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++static int net_close(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	if (netback_carrier_ok(netif))
++		__netif_down(netif);
++	netif_stop_queue(dev);
++	return 0;
 +}
 +
-+static void xencons_throttle(struct tty_struct *tty)
++static int netbk_change_mtu(struct net_device *dev, int mtu)
 +{
-+	if (DUMMY_TTY(tty))
-+		return;
++	int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
 +
-+	if (I_IXOFF(tty))
-+		xencons_send_xchar(tty, STOP_CHAR(tty));
++	if (mtu > max)
++		return -EINVAL;
++	dev->mtu = mtu;
++	return 0;
 +}
 +
-+static void xencons_unthrottle(struct tty_struct *tty)
++static int netbk_set_sg(struct net_device *dev, u32 data)
 +{
-+	if (DUMMY_TTY(tty))
-+		return;
++	if (data) {
++		netif_t *netif = netdev_priv(dev);
 +
-+	if (I_IXOFF(tty)) {
-+		if (x_char != 0)
-+			x_char = 0;
-+		else
-+			xencons_send_xchar(tty, START_CHAR(tty));
++		if (!(netif->features & NETIF_F_SG))
++			return -ENOSYS;
 +	}
++
++	return ethtool_op_set_sg(dev, data);
 +}
 +
-+static void xencons_flush_buffer(struct tty_struct *tty)
++static int netbk_set_tso(struct net_device *dev, u32 data)
 +{
-+	unsigned long flags;
++	if (data) {
++		netif_t *netif = netdev_priv(dev);
 +
-+	if (DUMMY_TTY(tty))
-+		return;
++		if (!(netif->features & NETIF_F_TSO))
++			return -ENOSYS;
++	}
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	wc = wp = 0;
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	return ethtool_op_set_tso(dev, data);
 +}
 +
-+static inline int __xencons_put_char(int ch)
++static struct ethtool_ops network_ethtool_ops =
 +{
-+	char _ch = (char)ch;
-+	if ((wp - wc) == wbuf_size)
-+		return 0;
-+	wbuf[WBUF_MASK(wp++)] = _ch;
-+	return 1;
-+}
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = ethtool_op_set_tx_csum,
++	.get_sg = ethtool_op_get_sg,
++	.set_sg = netbk_set_sg,
++	.get_tso = ethtool_op_get_tso,
++	.set_tso = netbk_set_tso,
++	.get_link = ethtool_op_get_link,
++};
 +
-+static int xencons_write(
-+	struct tty_struct *tty,
-+	const unsigned char *buf,
-+	int count)
++netif_t *netif_alloc(domid_t domid, unsigned int handle)
 +{
-+	int i;
-+	unsigned long flags;
-+
-+	if (DUMMY_TTY(tty))
-+		return count;
++	int err = 0;
++	struct net_device *dev;
++	netif_t *netif;
++	char name[IFNAMSIZ] = {};
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
++	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++	if (dev == NULL) {
++		DPRINTK("Could not create netif: out of memory\n");
++		return ERR_PTR(-ENOMEM);
++	}
 +
-+	for (i = 0; i < count; i++)
-+		if (!__xencons_put_char(buf[i]))
-+			break;
++	netif = netdev_priv(dev);
++	memset(netif, 0, sizeof(*netif));
++	netif->domid  = domid;
++	netif->handle = handle;
++	atomic_set(&netif->refcnt, 1);
++	init_waitqueue_head(&netif->waiting_to_free);
++	netif->dev = dev;
 +
-+	if (i != 0)
-+		__xencons_tx_flush();
++	netback_carrier_off(netif);
 +
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	netif->credit_bytes = netif->remaining_credit = ~0UL;
++	netif->credit_usec  = 0UL;
++	init_timer(&netif->credit_timeout);
++	/* Initialize 'expires' now: it's used to track the credit window. */
++	netif->credit_timeout.expires = jiffies;
 +
-+	return i;
-+}
++	init_timer(&netif->tx_queue_timeout);
 +
-+static void xencons_put_char(struct tty_struct *tty, u_char ch)
-+{
-+	unsigned long flags;
++	dev->hard_start_xmit = netif_be_start_xmit;
++	dev->get_stats       = netif_be_get_stats;
++	dev->open            = net_open;
++	dev->stop            = net_close;
++	dev->change_mtu	     = netbk_change_mtu;
++	dev->features        = NETIF_F_IP_CSUM;
 +
-+	if (DUMMY_TTY(tty))
-+		return;
++	SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	(void)__xencons_put_char(ch);
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
++	dev->tx_queue_len = netbk_queue_length;
 +
-+static void xencons_flush_chars(struct tty_struct *tty)
-+{
-+	unsigned long flags;
++	/*
++	 * Initialise a dummy MAC address. We choose the numerically
++	 * largest non-broadcast address to prevent the address getting
++	 * stolen by an Ethernet bridge for STP purposes.
++	 * (FE:FF:FF:FF:FF:FF)
++	 */ 
++	memset(dev->dev_addr, 0xFF, ETH_ALEN);
++	dev->dev_addr[0] &= ~0x01;
 +
-+	if (DUMMY_TTY(tty))
-+		return;
++	rtnl_lock();
++	err = register_netdevice(dev);
++	rtnl_unlock();
++	if (err) {
++		DPRINTK("Could not register new net device %s: err=%d\n",
++			dev->name, err);
++		free_netdev(dev);
++		return ERR_PTR(err);
++	}
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	DPRINTK("Successfully created netif\n");
++	return netif;
 +}
 +
-+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
++static int map_frontend_pages(
++	netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
 +{
-+	unsigned long orig_jiffies = jiffies;
++	struct gnttab_map_grant_ref op;
 +
-+	if (DUMMY_TTY(tty))
-+		return;
++	gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++			  GNTMAP_host_map, tx_ring_ref, netif->domid);
++    
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
 +
-+	while (DRV(tty->driver)->chars_in_buffer(tty)) {
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		schedule_timeout(1);
-+		if (signal_pending(current))
-+			break;
-+		if (timeout && time_after(jiffies, orig_jiffies + timeout))
-+			break;
++	if (op.status) { 
++		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++		return op.status;
 +	}
 +
-+	set_current_state(TASK_RUNNING);
-+}
++	netif->tx_shmem_ref    = tx_ring_ref;
++	netif->tx_shmem_handle = op.handle;
 +
-+static int xencons_open(struct tty_struct *tty, struct file *filp)
-+{
-+	unsigned long flags;
++	gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++			  GNTMAP_host_map, rx_ring_ref, netif->domid);
 +
-+	if (DUMMY_TTY(tty))
-+		return 0;
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
 +
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	tty->driver_data = NULL;
-+	if (xencons_tty == NULL)
-+		xencons_tty = tty;
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	if (op.status) {
++		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++		return op.status;
++	}
++
++	netif->rx_shmem_ref    = rx_ring_ref;
++	netif->rx_shmem_handle = op.handle;
 +
 +	return 0;
 +}
 +
-+static void xencons_close(struct tty_struct *tty, struct file *filp)
++static void unmap_frontend_pages(netif_t *netif)
 +{
-+	unsigned long flags;
-+
-+	if (DUMMY_TTY(tty))
-+		return;
++	struct gnttab_unmap_grant_ref op;
 +
-+	mutex_lock(&tty_mutex);
++	gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++			    GNTMAP_host_map, netif->tx_shmem_handle);
 +
-+	if (tty->count != 1) {
-+		mutex_unlock(&tty_mutex);
-+		return;
-+	}
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
 +
-+	/* Prevent other threads from re-opening this tty. */
-+	set_bit(TTY_CLOSING, &tty->flags);
-+	mutex_unlock(&tty_mutex);
++	gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++			    GNTMAP_host_map, netif->rx_shmem_handle);
 +
-+	tty->closing = 1;
-+	tty_wait_until_sent(tty, 0);
-+	if (DRV(tty->driver)->flush_buffer != NULL)
-+		DRV(tty->driver)->flush_buffer(tty);
-+	if (tty->ldisc.flush_buffer != NULL)
-+		tty->ldisc.flush_buffer(tty);
-+	tty->closing = 0;
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	xencons_tty = NULL;
-+	spin_unlock_irqrestore(&xencons_lock, flags);
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
 +}
 +
-+static struct tty_operations xencons_ops = {
-+	.open = xencons_open,
-+	.close = xencons_close,
-+	.write = xencons_write,
-+	.write_room = xencons_write_room,
-+	.put_char = xencons_put_char,
-+	.flush_chars = xencons_flush_chars,
-+	.chars_in_buffer = xencons_chars_in_buffer,
-+	.send_xchar = xencons_send_xchar,
-+	.flush_buffer = xencons_flush_buffer,
-+	.throttle = xencons_throttle,
-+	.unthrottle = xencons_unthrottle,
-+	.wait_until_sent = xencons_wait_until_sent,
-+};
-+
-+static int __init xencons_init(void)
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++	      unsigned long rx_ring_ref, unsigned int evtchn)
 +{
-+	int rc;
-+
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	int err = -ENOMEM;
++	netif_tx_sring_t *txs;
++	netif_rx_sring_t *rxs;
 +
-+	if (xc_mode == XC_OFF)
++	/* Already connected through? */
++	if (netif->irq)
 +		return 0;
 +
-+	if (!is_initial_xendomain()) {
-+		rc = xencons_ring_init();
-+		if (rc)
-+			return rc;
-+	}
-+
-+	xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
-+					  MAX_NR_CONSOLES : 1);
-+	if (xencons_driver == NULL)
++	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++	if (netif->tx_comms_area == NULL)
 +		return -ENOMEM;
++	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++	if (netif->rx_comms_area == NULL)
++		goto err_rx;
 +
-+	DRV(xencons_driver)->name            = "xencons";
-+	DRV(xencons_driver)->major           = TTY_MAJOR;
-+	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
-+	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
-+	DRV(xencons_driver)->init_termios    = tty_std_termios;
-+	DRV(xencons_driver)->flags           =
-+		TTY_DRIVER_REAL_RAW |
-+		TTY_DRIVER_RESET_TERMIOS;
-+	DRV(xencons_driver)->termios         = xencons_termios;
-+	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
++	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++	if (err)
++		goto err_map;
 +
-+	switch (xc_mode) {
-+	case XC_XVC:
-+		DRV(xencons_driver)->name        = "xvc";
-+		DRV(xencons_driver)->major       = XEN_XVC_MAJOR;
-+		DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
-+		DRV(xencons_driver)->name_base   = xc_num;
-+		break;
-+	case XC_SERIAL:
-+		DRV(xencons_driver)->name        = "ttyS";
-+		DRV(xencons_driver)->minor_start = 64 + xc_num;
-+		DRV(xencons_driver)->name_base   = xc_num;
-+		break;
-+	default:
-+		DRV(xencons_driver)->name        = "tty";
-+		DRV(xencons_driver)->minor_start = 1;
-+		DRV(xencons_driver)->name_base   = 1;
-+		break;
-+	}
++	err = bind_interdomain_evtchn_to_irqhandler(
++		netif->domid, evtchn, netif_be_int, 0,
++		netif->dev->name, netif);
++	if (err < 0)
++		goto err_hypervisor;
++	netif->irq = err;
++	disable_irq(netif->irq);
 +
-+	tty_set_operations(xencons_driver, &xencons_ops);
++	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
 +
-+	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
-+		printk("WARNING: Failed to register Xen virtual "
-+		       "console driver as '%s%d'\n",
-+		       DRV(xencons_driver)->name,
-+		       DRV(xencons_driver)->name_base);
-+		put_tty_driver(xencons_driver);
-+		xencons_driver = NULL;
-+		return rc;
-+	}
++	rxs = (netif_rx_sring_t *)
++		((char *)netif->rx_comms_area->addr);
++	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
 +
-+	if (is_initial_xendomain()) {
-+		xencons_priv_irq = bind_virq_to_irqhandler(
-+			VIRQ_CONSOLE,
-+			0,
-+			xencons_priv_interrupt,
-+			0,
-+			"console",
-+			NULL);
-+		BUG_ON(xencons_priv_irq < 0);
-+	}
++	netif->rx_req_cons_peek = 0;
 +
-+	printk("Xen virtual console successfully installed as %s%d\n",
-+	       DRV(xencons_driver)->name, xc_num);
++	netif_get(netif);
++
++	rtnl_lock();
++	netback_carrier_on(netif);
++	if (netif_running(netif->dev))
++		__netif_up(netif);
++	rtnl_unlock();
 +
 +	return 0;
++err_hypervisor:
++	unmap_frontend_pages(netif);
++err_map:
++	free_vm_area(netif->rx_comms_area);
++err_rx:
++	free_vm_area(netif->tx_comms_area);
++	return err;
 +}
 +
-+module_init(xencons_init);
++void netif_disconnect(netif_t *netif)
++{
++	if (netback_carrier_ok(netif)) {
++		rtnl_lock();
++		netback_carrier_off(netif);
++		netif_carrier_off(netif->dev); /* discard queued packets */
++		if (netif_running(netif->dev))
++			__netif_down(netif);
++		rtnl_unlock();
++		netif_put(netif);
++	}
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/console/Makefile tmp-linux-2.6-xen.patch/drivers/xen/console/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/console/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/console/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,2 @@
++	atomic_dec(&netif->refcnt);
++	wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
 +
-+obj-y	:= console.o xencons_ring.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/console/xencons_ring.c tmp-linux-2.6-xen.patch/drivers/xen/console/xencons_ring.c
---- pristine-linux-2.6.18.2/drivers/xen/console/xencons_ring.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/console/xencons_ring.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,143 @@
-+/* 
++	del_timer_sync(&netif->credit_timeout);
++	del_timer_sync(&netif->tx_queue_timeout);
++
++	if (netif->irq)
++		unbind_from_irqhandler(netif->irq, netif);
++	
++	unregister_netdev(netif->dev);
++
++	if (netif->tx.sring) {
++		unmap_frontend_pages(netif);
++		free_vm_area(netif->tx_comms_area);
++		free_vm_area(netif->rx_comms_area);
++	}
++
++	free_netdev(netif->dev);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/loopback.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/loopback.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,324 @@
++/******************************************************************************
++ * netback/loopback.c
++ * 
++ * A two-interface loopback device to emulate a local netfront-netback
++ * connection. This ensures that local packet delivery looks identical
++ * to inter-domain delivery. Most importantly, packets delivered locally
++ * originating from other domains will get *copied* when they traverse this
++ * driver. This prevents unbounded delays in socket-buffer queues from
++ * causing the netback driver to "seize up".
++ * 
++ * This driver creates a symmetric pair of loopback interfaces with names
++ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
++ * bridge, just like a proper netback interface, while a local IP interface
++ * is configured on 'veth0'.
++ * 
++ * As with a real netback interface, vif0.0 is configured with a suitable
++ * dummy MAC address. No default is provided for veth0: a reasonable strategy
++ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
++ * (to avoid confusing the Etherbridge).
++ * 
++ * Copyright (c) 2005 K A Fraser
++ * 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License version 2
 + * as published by the Free Software Foundation; or, when distributed
@@ -57704,307 +93867,295 @@
 + * IN THE SOFTWARE.
 + */
 +
-+#include <linux/version.h>
 +#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <net/dst.h>
++#include <net/xfrm.h>		/* secpath_reset() */
++#include <asm/hypervisor.h>	/* is_initial_xendomain() */
 +
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/xencons.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <xen/interface/io/console.h>
++static int nloopbacks = -1;
++module_param(nloopbacks, int, 0);
++MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
 +
-+static int xencons_irq;
++struct net_private {
++	struct net_device *loopback_dev;
++	struct net_device_stats stats;
++};
 +
-+static inline struct xencons_interface *xencons_interface(void)
++static int loopback_open(struct net_device *dev)
 +{
-+	return mfn_to_virt(xen_start_info->console.domU.mfn);
++	struct net_private *np = netdev_priv(dev);
++	memset(&np->stats, 0, sizeof(np->stats));
++	netif_start_queue(dev);
++	return 0;
 +}
 +
-+static inline void notify_daemon(void)
++static int loopback_close(struct net_device *dev)
 +{
-+	/* Use evtchn: this is called early, before irq is set up. */
-+	notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++	netif_stop_queue(dev);
++	return 0;
 +}
 +
-+int xencons_ring_send(const char *data, unsigned len)
++#ifdef CONFIG_X86
++static int is_foreign(unsigned long pfn)
 +{
-+	int sent = 0;
-+	struct xencons_interface *intf = xencons_interface();
-+	XENCONS_RING_IDX cons, prod;
++	/* NB. Play it safe for auto-translation mode. */
++	return (xen_feature(XENFEAT_auto_translated_physmap) ||
++		(phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
++}
++#else
++/* How to detect a foreign mapping? Play it safe. */
++#define is_foreign(pfn)	(1)
++#endif
 +
-+	cons = intf->out_cons;
-+	prod = intf->out_prod;
-+	mb();
-+	BUG_ON((prod - cons) > sizeof(intf->out));
++static int skb_remove_foreign_references(struct sk_buff *skb)
++{
++	struct page *page;
++	unsigned long pfn;
++	int i, off;
++	char *vaddr;
 +
-+	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
-+		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
++	BUG_ON(skb_shinfo(skb)->frag_list);
 +
-+	wmb();
-+	intf->out_prod = prod;
++	if (skb_cloned(skb) &&
++	    unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++		return 0;
 +
-+	notify_daemon();
++	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++		pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
++		if (!is_foreign(pfn))
++			continue;
++		
++		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++		if (unlikely(!page))
++			return 0;
 +
-+	return sent;
++		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
++		off = skb_shinfo(skb)->frags[i].page_offset;
++		memcpy(page_address(page) + off,
++		       vaddr + off,
++		       skb_shinfo(skb)->frags[i].size);
++		kunmap_skb_frag(vaddr);
++
++		put_page(skb_shinfo(skb)->frags[i].page);
++		skb_shinfo(skb)->frags[i].page = page;
++	}
++
++	return 1;
 +}
 +
-+static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
-+	struct xencons_interface *intf = xencons_interface();
-+	XENCONS_RING_IDX cons, prod;
-+
-+	cons = intf->in_cons;
-+	prod = intf->in_prod;
-+	mb();
-+	BUG_ON((prod - cons) > sizeof(intf->in));
++	struct net_private *np = netdev_priv(dev);
 +
-+	while (cons != prod) {
-+		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
-+		cons++;
++	if (!skb_remove_foreign_references(skb)) {
++		np->stats.tx_dropped++;
++		dev_kfree_skb(skb);
++		return 0;
 +	}
 +
-+	mb();
-+	intf->in_cons = cons;
++	dst_release(skb->dst);
++	skb->dst = NULL;
 +
-+	notify_daemon();
++	skb_orphan(skb);
 +
-+	xencons_tx();
++	np->stats.tx_bytes += skb->len;
++	np->stats.tx_packets++;
 +
-+	return IRQ_HANDLED;
-+}
++	/* Switch to loopback context. */
++	dev = np->loopback_dev;
++	np  = netdev_priv(dev);
 +
-+int xencons_ring_init(void)
-+{
-+	int irq;
++	np->stats.rx_bytes += skb->len;
++	np->stats.rx_packets++;
 +
-+	if (xencons_irq)
-+		unbind_from_irqhandler(xencons_irq, NULL);
-+	xencons_irq = 0;
++	if (skb->ip_summed == CHECKSUM_HW) {
++		/* Defer checksum calculation. */
++		skb->proto_csum_blank = 1;
++		/* Must be a local packet: assert its integrity. */
++		skb->proto_data_valid = 1;
++	}
 +
-+	if (!is_running_on_xen() ||
-+	    is_initial_xendomain() ||
-+	    !xen_start_info->console.domU.evtchn)
-+		return -ENODEV;
++	skb->ip_summed = skb->proto_data_valid ?
++		CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 +
-+	irq = bind_caller_port_to_irqhandler(
-+		xen_start_info->console.domU.evtchn,
-+		handle_input, 0, "xencons", NULL);
-+	if (irq < 0) {
-+		printk(KERN_ERR "XEN console request irq failed %i\n", irq);
-+		return irq;
-+	}
++	skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
++	skb->protocol = eth_type_trans(skb, dev);
++	skb->dev      = dev;
++	dev->last_rx  = jiffies;
 +
-+	xencons_irq = irq;
++	/* Flush netfilter context: rx'ed skbuffs not expected to have any. */
++	nf_reset(skb);
++	secpath_reset(skb);
 +
-+	/* In case we have in-flight data after save/restore... */
-+	notify_daemon();
++	netif_rx(skb);
 +
 +	return 0;
 +}
 +
-+void xencons_resume(void)
++static struct net_device_stats *loopback_get_stats(struct net_device *dev)
 +{
-+	(void)xencons_ring_init();
++	struct net_private *np = netdev_priv(dev);
++	return &np->stats;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/cpu_hotplug.c tmp-linux-2.6-xen.patch/drivers/xen/core/cpu_hotplug.c
---- pristine-linux-2.6.18.2/drivers/xen/core/cpu_hotplug.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/cpu_hotplug.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,172 @@
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <xen/cpu_hotplug.h>
-+#include <xen/xenbus.h>
++
++static struct ethtool_ops network_ethtool_ops =
++{
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = ethtool_op_set_tx_csum,
++	.get_sg = ethtool_op_get_sg,
++	.set_sg = ethtool_op_set_sg,
++	.get_tso = ethtool_op_get_tso,
++	.set_tso = ethtool_op_set_tso,
++	.get_link = ethtool_op_get_link,
++};
 +
 +/*
-+ * Set of CPUs that remote admin software will allow us to bring online.
-+ * Notified to us via xenbus.
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
 + */
-+static cpumask_t xenbus_allowed_cpumask;
-+
-+/* Set of CPUs that local admin will allow us to bring online. */
-+static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
-+
-+static int local_cpu_hotplug_request(void)
++static void loopback_set_multicast_list(struct net_device *dev)
 +{
-+	/*
-+	 * We assume a CPU hotplug request comes from local admin if it is made
-+	 * via a userspace process (i.e., one with a real mm_struct).
-+	 */
-+	return (current->mm != NULL);
 +}
 +
-+static void vcpu_hotplug(unsigned int cpu)
++static void loopback_construct(struct net_device *dev, struct net_device *lo)
 +{
-+	int err;
-+	char dir[32], state[32];
-+
-+	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
-+		return;
++	struct net_private *np = netdev_priv(dev);
 +
-+	sprintf(dir, "cpu/%d", cpu);
-+	err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
-+	if (err != 1) {
-+		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
-+		return;
-+	}
++	np->loopback_dev     = lo;
 +
-+	if (strcmp(state, "online") == 0) {
-+		cpu_set(cpu, xenbus_allowed_cpumask);
-+		(void)cpu_up(cpu);
-+	} else if (strcmp(state, "offline") == 0) {
-+		cpu_clear(cpu, xenbus_allowed_cpumask);
-+		(void)cpu_down(cpu);
-+	} else {
-+		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
-+		       state, cpu);
-+	}
-+}
++	dev->open            = loopback_open;
++	dev->stop            = loopback_close;
++	dev->hard_start_xmit = loopback_start_xmit;
++	dev->get_stats       = loopback_get_stats;
++	dev->set_multicast_list = loopback_set_multicast_list;
++	dev->change_mtu	     = NULL; /* allow arbitrary mtu */
 +
-+static void handle_vcpu_hotplug_event(
-+	struct xenbus_watch *watch, const char **vec, unsigned int len)
-+{
-+	int cpu;
-+	char *cpustr;
-+	const char *node = vec[XS_WATCH_PATH];
++	dev->tx_queue_len    = 0;
 +
-+	if ((cpustr = strstr(node, "cpu/")) != NULL) {
-+		sscanf(cpustr, "cpu/%d", &cpu);
-+		vcpu_hotplug(cpu);
-+	}
-+}
++	dev->features        = (NETIF_F_HIGHDMA |
++				NETIF_F_LLTX |
++				NETIF_F_TSO |
++				NETIF_F_SG |
++				NETIF_F_IP_CSUM);
 +
-+static int smpboot_cpu_notify(struct notifier_block *notifier,
-+			      unsigned long action, void *hcpu)
-+{
-+	int cpu = (long)hcpu;
++	SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
 +
 +	/*
-+	 * We do this in a callback notifier rather than __cpu_disable()
-+	 * because local_cpu_hotplug_request() does not work in the latter
-+	 * as it's always executed from within a stopmachine kthread.
++	 * We do not set a jumbo MTU on the interface. Otherwise the network
++	 * stack will try to send large packets that will get dropped by the
++	 * Ethernet bridge (unless the physical Ethernet interface is
++	 * configured to transfer jumbo packets). If a larger MTU is desired
++	 * then the system administrator can specify it using the 'ifconfig'
++	 * command.
 +	 */
-+	if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
-+		cpu_clear(cpu, local_allowed_cpumask);
-+
-+	return NOTIFY_OK;
-+}
-+
-+static int setup_cpu_watcher(struct notifier_block *notifier,
-+			      unsigned long event, void *data)
-+{
-+	int i;
-+
-+	static struct xenbus_watch cpu_watch = {
-+		.node = "cpu",
-+		.callback = handle_vcpu_hotplug_event,
-+		.flags = XBWF_new_thread };
-+	(void)register_xenbus_watch(&cpu_watch);
-+
-+	if (!is_initial_xendomain()) {
-+		for_each_possible_cpu(i)
-+			vcpu_hotplug(i);
-+		printk(KERN_INFO "Brought up %ld CPUs\n",
-+		       (long)num_online_cpus());
-+	}
-+
-+	return NOTIFY_DONE;
++	/*dev->mtu             = 16*1024;*/
 +}
 +
-+static int __init setup_vcpu_hotplug_event(void)
++static int __init make_loopback(int i)
 +{
-+	static struct notifier_block hotplug_cpu = {
-+		.notifier_call = smpboot_cpu_notify };
-+	static struct notifier_block xsn_cpu = {
-+		.notifier_call = setup_cpu_watcher };
++	struct net_device *dev1, *dev2;
++	char dev_name[IFNAMSIZ];
++	int err = -ENOMEM;
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	sprintf(dev_name, "vif0.%d", i);
++	dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++	if (!dev1)
++		return err;
 +
-+	register_cpu_notifier(&hotplug_cpu);
-+	register_xenstore_notifier(&xsn_cpu);
++	sprintf(dev_name, "veth%d", i);
++	dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++	if (!dev2)
++		goto fail_netdev2;
 +
-+	return 0;
-+}
++	loopback_construct(dev1, dev2);
++	loopback_construct(dev2, dev1);
 +
-+arch_initcall(setup_vcpu_hotplug_event);
++	/*
++	 * Initialise a dummy MAC address for the 'dummy backend' interface. We
++	 * choose the numerically largest non-broadcast address to prevent the
++	 * address getting stolen by an Ethernet bridge for STP purposes.
++	 */
++	memset(dev1->dev_addr, 0xFF, ETH_ALEN);
++	dev1->dev_addr[0] &= ~0x01;
 +
-+int smp_suspend(void)
-+{
-+	int cpu, err;
++	if ((err = register_netdev(dev1)) != 0)
++		goto fail;
 +
-+	for_each_online_cpu(cpu) {
-+		if (cpu == 0)
-+			continue;
-+		err = cpu_down(cpu);
-+		if (err) {
-+			printk(KERN_CRIT "Failed to take all CPUs "
-+			       "down: %d.\n", err);
-+			for_each_possible_cpu(cpu)
-+				vcpu_hotplug(cpu);
-+			return err;
-+		}
++	if ((err = register_netdev(dev2)) != 0) {
++		unregister_netdev(dev1);
++		goto fail;
 +	}
 +
 +	return 0;
-+}
-+
-+void smp_resume(void)
-+{
-+	int cpu;
 +
-+	for_each_possible_cpu(cpu)
-+		vcpu_hotplug(cpu);
++ fail:
++	free_netdev(dev2);
++ fail_netdev2:
++	free_netdev(dev1);
++	return err;
 +}
 +
-+int cpu_up_check(unsigned int cpu)
++static void __exit clean_loopback(int i)
 +{
-+	int rc = 0;
++	struct net_device *dev1, *dev2;
++	char dev_name[IFNAMSIZ];
 +
-+	if (local_cpu_hotplug_request()) {
-+		cpu_set(cpu, local_allowed_cpumask);
-+		if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
-+			printk("%s: attempt to bring up CPU %u disallowed by "
-+			       "remote admin.\n", __FUNCTION__, cpu);
-+			rc = -EBUSY;
-+		}
-+	} else if (!cpu_isset(cpu, local_allowed_cpumask) ||
-+		   !cpu_isset(cpu, xenbus_allowed_cpumask)) {
-+		rc = -EBUSY;
++	sprintf(dev_name, "vif0.%d", i);
++	dev1 = dev_get_by_name(dev_name);
++	sprintf(dev_name, "veth%d", i);
++	dev2 = dev_get_by_name(dev_name);
++	if (dev1 && dev2) {
++		unregister_netdev(dev2);
++		unregister_netdev(dev1);
++		free_netdev(dev2);
++		free_netdev(dev1);
 +	}
++}
 +
-+	return rc;
++static int __init loopback_init(void)
++{
++	int i, err = 0;
++
++	if (nloopbacks == -1)
++		nloopbacks = is_initial_xendomain() ? 4 : 0;
++
++	for (i = 0; i < nloopbacks; i++)
++		if ((err = make_loopback(i)) != 0)
++			break;
++
++	return err;
 +}
 +
-+void init_xenbus_allowed_cpumask(void)
++module_init(loopback_init);
++
++static void __exit loopback_exit(void)
 +{
-+	xenbus_allowed_cpumask = cpu_present_map;
++	int i;
++
++	for (i = nloopbacks; i-- > 0; )
++		clean_loopback(i);
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/evtchn.c tmp-linux-2.6-xen.patch/drivers/xen/core/evtchn.c
---- pristine-linux-2.6.18.2/drivers/xen/core/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/evtchn.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,1015 @@
++
++module_exit(loopback_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/netback.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/netback.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1614 @@
 +/******************************************************************************
-+ * evtchn.c
++ * drivers/xen/netback/netback.c
 + * 
-+ * Communication via Xen event channels.
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A 
++ * reference front-end implementation can be found in:
++ *  drivers/xen/netfront/netfront.c
 + * 
 + * Copyright (c) 2002-2005, K A Fraser
 + * 
@@ -58033,28282 +94184,31418 @@
 + * IN THE SOFTWARE.
 + */
 +
-+#include <linux/module.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/version.h>
-+#include <asm/atomic.h>
-+#include <asm/system.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <asm/hypervisor.h>
-+#include <linux/mc146818rtc.h> /* RTC_IRQ */
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
 +
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static DEFINE_SPINLOCK(irq_mapping_update_lock);
++/*define NETBE_DEBUG_INTERRUPT*/
 +
-+/* IRQ <-> event-channel mappings. */
-+static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
-+	[0 ...  NR_EVENT_CHANNELS-1] = -1 };
++/* extra field used in struct page */
++#define netif_page_index(pg) (*(long *)&(pg)->mapping)
 +
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
++struct netbk_rx_meta {
++	skb_frag_t frag;
++	int id;
++	u8 copy:1;
++};
 +
-+/* Binding types. */
-+enum {
-+	IRQT_UNBOUND,
-+	IRQT_PIRQ,
-+	IRQT_VIRQ,
-+	IRQT_IPI,
-+	IRQT_LOCAL_PORT,
-+	IRQT_CALLER_PORT
++struct netbk_tx_pending_inuse {
++	struct list_head list;
++	unsigned long alloc_time;
 +};
 +
-+/* Constructor for packed IRQ information. */
-+static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
-+{
-+	return ((type << 24) | (index << 16) | evtchn);
-+}
++static void netif_idx_release(u16 pending_idx);
++static void netif_page_release(struct page *page);
++static void make_tx_response(netif_t *netif, 
++			     netif_tx_request_t *txp,
++			     s8       st);
++static netif_rx_response_t *make_rx_response(netif_t *netif, 
++					     u16      id, 
++					     s8       st,
++					     u16      offset,
++					     u16      size,
++					     u16      flags);
 +
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
 +
-+/*
-+ * Accessors for packed IRQ information.
-+ */
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
 +
-+static inline unsigned int evtchn_from_irq(int irq)
-+{
-+	return (u16)(irq_info[irq]);
-+}
++static struct timer_list net_timer;
++static struct timer_list netbk_tx_pending_timer;
 +
-+static inline unsigned int index_from_irq(int irq)
++#define MAX_PENDING_REQS 256
++
++static struct sk_buff_head rx_queue;
++
++static struct page **mmap_pages;
++static inline unsigned long idx_to_pfn(unsigned int idx)
 +{
-+	return (u8)(irq_info[irq] >> 16);
++	return page_to_pfn(mmap_pages[idx]);
 +}
 +
-+static inline unsigned int type_from_irq(int irq)
++static inline unsigned long idx_to_kaddr(unsigned int idx)
 +{
-+	return (u8)(irq_info[irq] >> 24);
++	return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
 +}
 +
-+/* IRQ <-> VIRQ mapping. */
-+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
++#define PKT_PROT_LEN 64
 +
-+/* IRQ <-> IPI mapping. */
-+#ifndef NR_IPIS
-+#define NR_IPIS 1
-+#endif
-+DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
++static struct pending_tx_info {
++	netif_tx_request_t req;
++	netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
 +
-+/* Reference counts for bindings to IRQs. */
-+static int irq_bindcount[NR_IRQS];
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
 +
-+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-+static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
++/* Doubly-linked list of in-use pending entries. */
++static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++static LIST_HEAD(pending_inuse_head);
 +
-+#ifdef CONFIG_SMP
++static struct sk_buff_head tx_queue;
 +
-+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
 +
-+static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
-+					   unsigned int idx)
-+{
-+	return (sh->evtchn_pending[idx] &
-+		cpu_evtchn_mask[cpu][idx] &
-+		~sh->evtchn_mask[idx]);
-+}
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
 +
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
-+{
-+	int irq = evtchn_to_irq[chn];
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
 +
-+	BUG_ON(irq == -1);
-+	set_native_irq_info(irq, cpumask_of_cpu(cpu));
++/* Setting this allows the safe use of this driver without netloop. */
++static int MODPARM_copy_skb = 1;
++module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
++MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
 +
-+	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-+	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-+	cpu_evtchn[chn] = cpu;
-+}
++int netbk_copy_skb_mode;
 +
-+static void init_evtchn_cpu_bindings(void)
++static inline unsigned long alloc_mfn(void)
 +{
-+	int i;
-+
-+	/* By default all event channels notify CPU#0. */
-+	for (i = 0; i < NR_IRQS; i++)
-+		set_native_irq_info(i, cpumask_of_cpu(0));
-+
-+	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-+	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++	BUG_ON(alloc_index == 0);
++	return mfn_list[--alloc_index];
 +}
 +
-+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++static int check_mfn(int nr)
 +{
-+	return cpu_evtchn[evtchn];
-+}
++	struct xen_memory_reservation reservation = {
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++	int rc;
 +
-+#else
++	if (likely(alloc_index >= nr))
++		return 0;
 +
-+static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
-+					   unsigned int idx)
-+{
-+	return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
++	set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
++	reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
++	rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
++	if (likely(rc > 0))
++		alloc_index += rc;
++
++	return alloc_index >= nr ? 0 : -ENOMEM;
 +}
 +
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++static inline void maybe_schedule_tx_action(void)
 +{
++	smp_mb();
++	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++	    !list_empty(&net_schedule_list))
++		tasklet_schedule(&net_tx_tasklet);
 +}
 +
-+static void init_evtchn_cpu_bindings(void)
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
 +{
++	struct skb_shared_info *ninfo;
++	struct sk_buff *nskb;
++	unsigned long offset;
++	int ret;
++	int len;
++	int headlen;
++
++	BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++	nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++	if (unlikely(!nskb))
++		goto err;
++
++	skb_reserve(nskb, 16 + NET_IP_ALIGN);
++	headlen = nskb->end - nskb->data;
++	if (headlen > skb_headlen(skb))
++		headlen = skb_headlen(skb);
++	ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++	BUG_ON(ret);
++
++	ninfo = skb_shinfo(nskb);
++	ninfo->gso_size = skb_shinfo(skb)->gso_size;
++	ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++	offset = headlen;
++	len = skb->len - headlen;
++
++	nskb->len = skb->len;
++	nskb->data_len = len;
++	nskb->truesize += len;
++
++	while (len) {
++		struct page *page;
++		int copy;
++		int zero;
++
++		if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++			dump_stack();
++			goto err_free;
++		}
++
++		copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++		zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++		if (unlikely(!page))
++			goto err_free;
++
++		ret = skb_copy_bits(skb, offset, page_address(page), copy);
++		BUG_ON(ret);
++
++		ninfo->frags[ninfo->nr_frags].page = page;
++		ninfo->frags[ninfo->nr_frags].page_offset = 0;
++		ninfo->frags[ninfo->nr_frags].size = copy;
++		ninfo->nr_frags++;
++
++		offset += copy;
++		len -= copy;
++	}
++
++	offset = nskb->data - skb->data;
++
++	nskb->h.raw = skb->h.raw + offset;
++	nskb->nh.raw = skb->nh.raw + offset;
++	nskb->mac.raw = skb->mac.raw + offset;
++
++	return nskb;
++
++ err_free:
++	kfree_skb(nskb);
++ err:
++	return NULL;
 +}
 +
-+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++static inline int netbk_max_required_rx_slots(netif_t *netif)
 +{
-+	return 0;
++	if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++		return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++	return 1; /* all in one */
 +}
 +
-+#endif
-+
-+/* Upcall to generic IRQ layer. */
-+#ifdef CONFIG_X86
-+extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
-+void __init xen_init_IRQ(void);
-+void __init init_IRQ(void)
++static inline int netbk_queue_full(netif_t *netif)
 +{
-+	irq_ctx_init(0);
-+	xen_init_IRQ();
-+}
-+#if defined (__i386__)
-+static inline void exit_idle(void) {}
-+#define IRQ_REG orig_eax
-+#elif defined (__x86_64__)
-+#include <asm/idle.h>
-+#define IRQ_REG orig_rax
-+#endif
-+#define do_IRQ(irq, regs) do {		\
-+	(regs)->IRQ_REG = ~(irq);	\
-+	do_IRQ((regs));			\
-+} while (0)
-+#endif
++	RING_IDX peek   = netif->rx_req_cons_peek;
++	RING_IDX needed = netbk_max_required_rx_slots(netif);
 +
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(chn)	((chn) != 0)
++	return ((netif->rx.sring->req_prod - peek) < needed) ||
++	       ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
 +
-+/*
-+ * Force a proper event-channel callback from Xen after clearing the
-+ * callback mask. We do this in a very simple manner, by making a call
-+ * down into Xen. The pending flag will be checked by Xen on return.
-+ */
-+void force_evtchn_callback(void)
++static void tx_queue_callback(unsigned long data)
 +{
-+	(void)HYPERVISOR_xen_version(0, NULL);
++	netif_t *netif = (netif_t *)data;
++	if (netif_schedulable(netif))
++		netif_wake_queue(netif->dev);
 +}
-+/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
-+EXPORT_SYMBOL(force_evtchn_callback);
-+
-+static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
 +
-+/* NB. Interrupts are disabled on entry. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
-+	unsigned long  l1, l2;
-+	unsigned int   l1i, l2i, port, count;
-+	int            irq, cpu = smp_processor_id();
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
++	netif_t *netif = netdev_priv(dev);
 +
-+	do {
-+		/* Avoid a callback storm when we reenable delivery. */
-+		vcpu_info->evtchn_upcall_pending = 0;
++	BUG_ON(skb->dev != dev);
 +
-+		/* Nested invocations bail immediately. */
-+		if (unlikely(per_cpu(upcall_count, cpu)++))
-+			return;
++	/* Drop the packet if the target domain has no receive buffers. */
++	if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++		goto drop;
 +
-+#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
-+		/* Clear master flag /before/ clearing selector flag. */
-+		rmb();
-+#endif
-+		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+		while (l1 != 0) {
-+			l1i = __ffs(l1);
-+			l1 &= ~(1UL << l1i);
++	/*
++	 * Copy the packet here if it's destined for a flipping interface
++	 * but isn't flippable (e.g. extra references to data).
++	 * XXX For now we also copy skbuffs whose head crosses a page
++	 * boundary, because netbk_gop_skb can't handle them.
++	 */
++	if (!netif->copying_receiver ||
++	    ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
++		struct sk_buff *nskb = netbk_copy_skb(skb);
++		if ( unlikely(nskb == NULL) )
++			goto drop;
++		/* Copy only the header fields we use in this driver. */
++		nskb->dev = skb->dev;
++		nskb->ip_summed = skb->ip_summed;
++		nskb->proto_data_valid = skb->proto_data_valid;
++		dev_kfree_skb(skb);
++		skb = nskb;
++	}
 +
-+			while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-+				l2i = __ffs(l2);
++	netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
++				   !!skb_shinfo(skb)->gso_size;
++	netif_get(netif);
 +
-+				port = (l1i * BITS_PER_LONG) + l2i;
-+				if ((irq = evtchn_to_irq[port]) != -1)
-+					do_IRQ(irq, regs);
-+				else {
-+					exit_idle();
-+					evtchn_device_upcall(port);
-+				}
-+			}
++	if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++		netif->rx.sring->req_event = netif->rx_req_cons_peek +
++			netbk_max_required_rx_slots(netif);
++		mb(); /* request notification /then/ check & stop the queue */
++		if (netbk_queue_full(netif)) {
++			netif_stop_queue(dev);
++			/*
++			 * Schedule 500ms timeout to restart the queue, thus
++			 * ensuring that an inactive queue will be drained.
++			 * Packets will be immediately be dropped until more
++			 * receive buffers become available (see
++			 * netbk_queue_full() check above).
++			 */
++			netif->tx_queue_timeout.data = (unsigned long)netif;
++			netif->tx_queue_timeout.function = tx_queue_callback;
++			__mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
 +		}
++	}
 +
-+		/* If there were nested callbacks then we have more to do. */
-+		count = per_cpu(upcall_count, cpu);
-+		per_cpu(upcall_count, cpu) = 0;
-+	} while (unlikely(count != 1));
-+}
-+
-+static int find_unbound_irq(void)
-+{
-+	static int warned;
-+	int dynirq, irq;
++	skb_queue_tail(&rx_queue, skb);
++	tasklet_schedule(&net_rx_tasklet);
 +
-+	for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
-+		irq = dynirq_to_irq(dynirq);
-+		if (irq_bindcount[irq] == 0)
-+			return irq;
-+	}
++	return 0;
 +
-+	if (!warned) {
-+		warned = 1;
-+		printk(KERN_WARNING "No available IRQ to bind to: "
-+		       "increase NR_DYNIRQS.\n");
-+	}
++ drop:
++	netif->stats.tx_dropped++;
++	dev_kfree_skb(skb);
++	return 0;
++}
 +
-+	return -ENOSPC;
++#if 0
++static void xen_network_done_notify(void)
++{
++	static struct net_device *eth0_dev = NULL;
++	if (unlikely(eth0_dev == NULL))
++		eth0_dev = __dev_get_by_name("eth0");
++	netif_rx_schedule(eth0_dev);
++}
++/* 
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ *  if ( xen_network_done() )
++ *      tg3_enable_ints(tp);
++ */
++int xen_network_done(void)
++{
++	return skb_queue_empty(&rx_queue);
 +}
++#endif
 +
-+static int bind_caller_port_to_irq(unsigned int caller_port)
++struct netrx_pending_operations {
++	unsigned trans_prod, trans_cons;
++	unsigned mmu_prod, mmu_mcl;
++	unsigned mcl_prod, mcl_cons;
++	unsigned copy_prod, copy_cons;
++	unsigned meta_prod, meta_cons;
++	mmu_update_t *mmu;
++	gnttab_transfer_t *trans;
++	gnttab_copy_t *copy;
++	multicall_entry_t *mcl;
++	struct netbk_rx_meta *meta;
++};
++
++/* Set up the grant operations for this fragment.  If it's a flipping
++   interface, we also set up the unmap request from here. */
++static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++			  int i, struct netrx_pending_operations *npo,
++			  struct page *page, unsigned long size,
++			  unsigned long offset)
 +{
-+	int irq;
++	mmu_update_t *mmu;
++	gnttab_transfer_t *gop;
++	gnttab_copy_t *copy_gop;
++	multicall_entry_t *mcl;
++	netif_rx_request_t *req;
++	unsigned long old_mfn, new_mfn;
 +
-+	spin_lock(&irq_mapping_update_lock);
++	old_mfn = virt_to_mfn(page_address(page));
 +
-+	if ((irq = evtchn_to_irq[caller_port]) == -1) {
-+		if ((irq = find_unbound_irq()) < 0)
-+			goto out;
++	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++	if (netif->copying_receiver) {
++		/* The fragment needs to be copied rather than
++		   flipped. */
++		meta->copy = 1;
++		copy_gop = npo->copy + npo->copy_prod++;
++		copy_gop->flags = GNTCOPY_dest_gref;
++		if (PageForeign(page)) {
++			struct pending_tx_info *src_pend =
++				&pending_tx_info[netif_page_index(page)];
++			copy_gop->source.domid = src_pend->netif->domid;
++			copy_gop->source.u.ref = src_pend->req.gref;
++			copy_gop->flags |= GNTCOPY_source_gref;
++		} else {
++			copy_gop->source.domid = DOMID_SELF;
++			copy_gop->source.u.gmfn = old_mfn;
++		}
++		copy_gop->source.offset = offset;
++		copy_gop->dest.domid = netif->domid;
++		copy_gop->dest.offset = 0;
++		copy_gop->dest.u.ref = req->gref;
++		copy_gop->len = size;
++	} else {
++		meta->copy = 0;
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			new_mfn = alloc_mfn();
 +
-+		evtchn_to_irq[caller_port] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
-+	}
++			/*
++			 * Set the new P2M table entry before
++			 * reassigning the old data page. Heed the
++			 * comment in pgtable-2level.h:pte_page(). :-)
++			 */
++			set_phys_to_machine(page_to_pfn(page), new_mfn);
++
++			mcl = npo->mcl + npo->mcl_prod++;
++			MULTI_update_va_mapping(mcl,
++					     (unsigned long)page_address(page),
++					     pfn_pte_ma(new_mfn, PAGE_KERNEL),
++					     0);
 +
-+	irq_bindcount[irq]++;
++			mmu = npo->mmu + npo->mmu_prod++;
++			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++				MMU_MACHPHYS_UPDATE;
++			mmu->val = page_to_pfn(page);
++		}
 +
-+ out:
-+	spin_unlock(&irq_mapping_update_lock);
-+	return irq;
++		gop = npo->trans + npo->trans_prod++;
++		gop->mfn = old_mfn;
++		gop->domid = netif->domid;
++		gop->ref = req->gref;
++	}
++	return req->id;
 +}
 +
-+static int bind_local_port_to_irq(unsigned int local_port)
++static void netbk_gop_skb(struct sk_buff *skb,
++			  struct netrx_pending_operations *npo)
 +{
-+	int irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
++	netif_t *netif = netdev_priv(skb->dev);
++	int nr_frags = skb_shinfo(skb)->nr_frags;
++	int i;
++	int extra;
++	struct netbk_rx_meta *head_meta, *meta;
 +
-+	BUG_ON(evtchn_to_irq[local_port] != -1);
++	head_meta = npo->meta + npo->meta_prod++;
++	head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
++	head_meta->frag.size = skb_shinfo(skb)->gso_size;
++	extra = !!head_meta->frag.size + 1;
 +
-+	if ((irq = find_unbound_irq()) < 0) {
-+		struct evtchn_close close = { .port = local_port };
-+		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-+			BUG();
-+		goto out;
++	for (i = 0; i < nr_frags; i++) {
++		meta = npo->meta + npo->meta_prod++;
++		meta->frag = skb_shinfo(skb)->frags[i];
++		meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
++					  meta->frag.page,
++					  meta->frag.size,
++					  meta->frag.page_offset);
 +	}
 +
-+	evtchn_to_irq[local_port] = irq;
-+	irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
-+	irq_bindcount[irq]++;
++	/*
++	 * This must occur at the end to ensure that we don't trash skb_shinfo
++	 * until we're done. We know that the head doesn't cross a page
++	 * boundary because such packets get copied in netif_be_start_xmit.
++	 */
++	head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
++				       virt_to_page(skb->data),
++				       skb_headlen(skb),
++				       offset_in_page(skb->data));
 +
-+ out:
-+	spin_unlock(&irq_mapping_update_lock);
-+	return irq;
++	netif->rx.req_cons += nr_frags + extra;
 +}
 +
-+static int bind_listening_port_to_irq(unsigned int remote_domain)
++static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
 +{
-+	struct evtchn_alloc_unbound alloc_unbound;
-+	int err;
-+
-+	alloc_unbound.dom        = DOMID_SELF;
-+	alloc_unbound.remote_dom = remote_domain;
-+
-+	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+					  &alloc_unbound);
++	int i;
 +
-+	return err ? : bind_local_port_to_irq(alloc_unbound.port);
++	for (i = 0; i < nr_frags; i++)
++		put_page(meta[i].frag.page);
 +}
 +
-+static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-+					  unsigned int remote_port)
++/* This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
++   used to set up the operations on the top of
++   netrx_pending_operations, which have since been done.  Check that
++   they didn't give any errors and advance over them. */
++static int netbk_check_gop(int nr_frags, domid_t domid,
++			   struct netrx_pending_operations *npo)
 +{
-+	struct evtchn_bind_interdomain bind_interdomain;
-+	int err;
++	multicall_entry_t *mcl;
++	gnttab_transfer_t *gop;
++	gnttab_copy_t     *copy_op;
++	int status = NETIF_RSP_OKAY;
++	int i;
 +
-+	bind_interdomain.remote_dom  = remote_domain;
-+	bind_interdomain.remote_port = remote_port;
++	for (i = 0; i <= nr_frags; i++) {
++		if (npo->meta[npo->meta_cons + i].copy) {
++			copy_op = npo->copy + npo->copy_cons++;
++			if (copy_op->status != GNTST_okay) {
++				DPRINTK("Bad status %d from copy to DOM%d.\n",
++					copy_op->status, domid);
++				status = NETIF_RSP_ERROR;
++			}
++		} else {
++			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++				mcl = npo->mcl + npo->mcl_cons++;
++				/* The update_va_mapping() must not fail. */
++				BUG_ON(mcl->result != 0);
++			}
 +
-+	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+					  &bind_interdomain);
++			gop = npo->trans + npo->trans_cons++;
++			/* Check the reassignment error code. */
++			if (gop->status != 0) {
++				DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++					gop->status, domid);
++				/*
++				 * Page no longer belongs to us unless
++				 * GNTST_bad_page, but that should be
++				 * a fatal error anyway.
++				 */
++				BUG_ON(gop->status == GNTST_bad_page);
++				status = NETIF_RSP_ERROR;
++			}
++		}
++	}
 +
-+	return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
++	return status;
 +}
 +
-+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++static void netbk_add_frag_responses(netif_t *netif, int status,
++				     struct netbk_rx_meta *meta, int nr_frags)
 +{
-+	struct evtchn_bind_virq bind_virq;
-+	int evtchn, irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
-+		if ((irq = find_unbound_irq()) < 0)
-+			goto out;
-+
-+		bind_virq.virq = virq;
-+		bind_virq.vcpu = cpu;
-+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+						&bind_virq) != 0)
-+			BUG();
-+		evtchn = bind_virq.port;
-+
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++	int i;
++	unsigned long offset;
 +
-+		per_cpu(virq_to_irq, cpu)[virq] = irq;
++	for (i = 0; i < nr_frags; i++) {
++		int id = meta[i].id;
++		int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
 +
-+		bind_evtchn_to_cpu(evtchn, cpu);
++		if (meta[i].copy)
++			offset = 0;
++		else
++			offset = meta[i].frag.page_offset;
++		make_rx_response(netif, id, status, offset,
++				 meta[i].frag.size, flags);
 +	}
-+
-+	irq_bindcount[irq]++;
-+
-+ out:
-+	spin_unlock(&irq_mapping_update_lock);
-+	return irq;
 +}
 +
-+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
++static void net_rx_action(unsigned long unused)
 +{
-+	struct evtchn_bind_ipi bind_ipi;
-+	int evtchn, irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
-+		if ((irq = find_unbound_irq()) < 0)
-+			goto out;
-+
-+		bind_ipi.vcpu = cpu;
-+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
-+						&bind_ipi) != 0)
-+			BUG();
-+		evtchn = bind_ipi.port;
-+
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
-+
-+		bind_evtchn_to_cpu(evtchn, cpu);
-+	}
++	netif_t *netif = NULL;
++	s8 status;
++	u16 id, irq, flags;
++	netif_rx_response_t *resp;
++	multicall_entry_t *mcl;
++	struct sk_buff_head rxq;
++	struct sk_buff *skb;
++	int notify_nr = 0;
++	int ret;
++	int nr_frags;
++	int count;
++	unsigned long offset;
 +
-+	irq_bindcount[irq]++;
++	/*
++	 * Putting hundreds of bytes on the stack is considered rude.
++	 * Static works because a tasklet can only be on one CPU at any time.
++	 */
++	static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
++	static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++	static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
++	static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++	static unsigned char rx_notify[NR_IRQS];
++	static u16 notify_list[NET_RX_RING_SIZE];
++	static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
 +
-+ out:
-+	spin_unlock(&irq_mapping_update_lock);
-+	return irq;
-+}
++	struct netrx_pending_operations npo = {
++		mmu: rx_mmu,
++		trans: grant_trans_op,
++		copy: grant_copy_op,
++		mcl: rx_mcl,
++		meta: meta};
 +
-+static void unbind_from_irq(unsigned int irq)
-+{
-+	struct evtchn_close close;
-+	int cpu, evtchn = evtchn_from_irq(irq);
++	skb_queue_head_init(&rxq);
 +
-+	spin_lock(&irq_mapping_update_lock);
++	count = 0;
 +
-+	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
-+		close.port = evtchn;
-+		if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
-+		    HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-+			BUG();
++	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++		nr_frags = skb_shinfo(skb)->nr_frags;
++		*(int *)skb->cb = nr_frags;
 +
-+		switch (type_from_irq(irq)) {
-+		case IRQT_VIRQ:
-+			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-+				[index_from_irq(irq)] = -1;
-+			break;
-+		case IRQT_IPI:
-+			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-+				[index_from_irq(irq)] = -1;
-+			break;
-+		default:
++		if (!xen_feature(XENFEAT_auto_translated_physmap) &&
++		    !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++		    check_mfn(nr_frags + 1)) {
++			/* Memory squeeze? Back off for an arbitrary while. */
++			if ( net_ratelimit() )
++				WPRINTK("Memory squeeze in netback "
++					"driver.\n");
++			mod_timer(&net_timer, jiffies + HZ);
++			skb_queue_head(&rx_queue, skb);
 +			break;
 +		}
 +
-+		/* Closed ports are implicitly re-bound to VCPU0. */
-+		bind_evtchn_to_cpu(evtchn, 0);
-+
-+		evtchn_to_irq[evtchn] = -1;
-+		irq_info[irq] = IRQ_UNBOUND;
-+
-+		/* Zap stats across IRQ changes of use. */
-+		for_each_possible_cpu(cpu)
-+			kstat_cpu(cpu).irqs[irq] = 0;
-+	}
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+}
++		netbk_gop_skb(skb, &npo);
 +
-+int bind_caller_port_to_irqhandler(
-+	unsigned int caller_port,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	int irq, retval;
++		count += nr_frags + 1;
 +
-+	irq = bind_caller_port_to_irq(caller_port);
-+	if (irq < 0)
-+		return irq;
++		__skb_queue_tail(&rxq, skb);
 +
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
++		/* Filled the batch queue? */
++		if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++			break;
 +	}
 +
-+	return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
++	BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
 +
-+int bind_listening_port_to_irqhandler(
-+	unsigned int remote_domain,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	int irq, retval;
++	npo.mmu_mcl = npo.mcl_prod;
++	if (npo.mcl_prod) {
++		BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++		BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
++		mcl = npo.mcl + npo.mcl_prod++;
 +
-+	irq = bind_listening_port_to_irq(remote_domain);
-+	if (irq < 0)
-+		return irq;
++		BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
++		mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
 +
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
++		mcl->op = __HYPERVISOR_mmu_update;
++		mcl->args[0] = (unsigned long)rx_mmu;
++		mcl->args[1] = npo.mmu_prod;
++		mcl->args[2] = 0;
++		mcl->args[3] = DOMID_SELF;
 +	}
 +
-+	return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
-+
-+int bind_interdomain_evtchn_to_irqhandler(
-+	unsigned int remote_domain,
-+	unsigned int remote_port,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	int irq, retval;
-+
-+	irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
-+	if (irq < 0)
-+		return irq;
++	if (npo.trans_prod) {
++		BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
++		mcl = npo.mcl + npo.mcl_prod++;
++		mcl->op = __HYPERVISOR_grant_table_op;
++		mcl->args[0] = GNTTABOP_transfer;
++		mcl->args[1] = (unsigned long)grant_trans_op;
++		mcl->args[2] = npo.trans_prod;
++	}
 +
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
++	if (npo.copy_prod) {
++		BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
++		mcl = npo.mcl + npo.mcl_prod++;
++		mcl->op = __HYPERVISOR_grant_table_op;
++		mcl->args[0] = GNTTABOP_copy;
++		mcl->args[1] = (unsigned long)grant_copy_op;
++		mcl->args[2] = npo.copy_prod;
 +	}
 +
-+	return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++	/* Nothing to do? */
++	if (!npo.mcl_prod)
++		return;
 +
-+int bind_virq_to_irqhandler(
-+	unsigned int virq,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	int irq, retval;
++	BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
 +
-+	irq = bind_virq_to_irq(virq, cpu);
-+	if (irq < 0)
-+		return irq;
++	ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++	BUG_ON(ret != 0);
++	/* The mmu_machphys_update() must not fail. */
++	BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
 +
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
-+	}
++	while ((skb = __skb_dequeue(&rxq)) != NULL) {
++		nr_frags = *(int *)skb->cb;
 +
-+	return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
++		netif = netdev_priv(skb->dev);
++		/* We can't rely on skb_release_data to release the
++		   pages used by fragments for us, since it tries to
++		   touch the pages in the fraglist.  If we're in
++		   flipping mode, that doesn't work.  In copying mode,
++		   we still have access to all of the pages, and so
++		   it's safe to let release_data deal with it. */
++		/* (Freeing the fragments is safe since we copy
++		   non-linear skbs destined for flipping interfaces) */
++		if (!netif->copying_receiver) {
++			atomic_set(&(skb_shinfo(skb)->dataref), 1);
++			skb_shinfo(skb)->frag_list = NULL;
++			skb_shinfo(skb)->nr_frags = 0;
++			netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
++		}
 +
-+int bind_ipi_to_irqhandler(
-+	unsigned int ipi,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	int irq, retval;
++		netif->stats.tx_bytes += skb->len;
++		netif->stats.tx_packets++;
 +
-+	irq = bind_ipi_to_irq(ipi, cpu);
-+	if (irq < 0)
-+		return irq;
++		status = netbk_check_gop(nr_frags, netif->domid, &npo);
 +
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
-+	}
++		id = meta[npo.meta_cons].id;
++		flags = nr_frags ? NETRXF_more_data : 0;
 +
-+	return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
++		if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++			flags |= NETRXF_csum_blank | NETRXF_data_validated;
++		else if (skb->proto_data_valid) /* remote but checksummed? */
++			flags |= NETRXF_data_validated;
 +
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+	free_irq(irq, dev_id);
-+	unbind_from_irq(irq);
-+}
-+EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
++		if (meta[npo.meta_cons].copy)
++			offset = 0;
++		else
++			offset = offset_in_page(skb->data);
++		resp = make_rx_response(netif, id, status, offset,
++					skb_headlen(skb), flags);
 +
-+#ifdef CONFIG_SMP
-+/* Rebind an evtchn so that it gets delivered to a specific cpu */
-+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
-+{
-+	struct evtchn_bind_vcpu bind_vcpu;
-+	int evtchn = evtchn_from_irq(irq);
++		if (meta[npo.meta_cons].frag.size) {
++			struct netif_extra_info *gso =
++				(struct netif_extra_info *)
++				RING_GET_RESPONSE(&netif->rx,
++						  netif->rx.rsp_prod_pvt++);
 +
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
++			resp->flags |= NETRXF_extra_info;
++
++			gso->u.gso.size = meta[npo.meta_cons].frag.size;
++			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++			gso->u.gso.pad = 0;
++			gso->u.gso.features = 0;
 +
-+	/* Send future instances of this interrupt to other vcpu. */
-+	bind_vcpu.port = evtchn;
-+	bind_vcpu.vcpu = tcpu;
++			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++			gso->flags = 0;
++		}
 +
-+	/*
-+	 * If this fails, it usually just indicates that we're dealing with a 
-+	 * virq or IPI channel, which don't actually need to be rebound. Ignore
-+	 * it, but don't do the xenlinux-level rebind in that case.
-+	 */
-+	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
-+		bind_evtchn_to_cpu(evtchn, tcpu);
-+}
++		netbk_add_frag_responses(netif, status,
++					 meta + npo.meta_cons + 1,
++					 nr_frags);
 +
-+static void set_affinity_irq(unsigned irq, cpumask_t dest)
-+{
-+	unsigned tcpu = first_cpu(dest);
-+	rebind_irq_to_cpu(irq, tcpu);
-+}
-+#endif
++		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++		irq = netif->irq;
++		if (ret && !rx_notify[irq]) {
++			rx_notify[irq] = 1;
++			notify_list[notify_nr++] = irq;
++		}
 +
-+int resend_irq_on_evtchn(unsigned int irq)
-+{
-+	int masked, evtchn = evtchn_from_irq(irq);
-+	shared_info_t *s = HYPERVISOR_shared_info;
++		if (netif_queue_stopped(netif->dev) &&
++		    netif_schedulable(netif) &&
++		    !netbk_queue_full(netif))
++			netif_wake_queue(netif->dev);
 +
-+	if (!VALID_EVTCHN(evtchn))
-+		return 1;
++		netif_put(netif);
++		dev_kfree_skb(skb);
++		npo.meta_cons += nr_frags + 1;
++	}
 +
-+	masked = synch_test_and_set_bit(evtchn, s->evtchn_mask);
-+	synch_set_bit(evtchn, s->evtchn_pending);
-+	if (!masked)
-+		unmask_evtchn(evtchn);
++	while (notify_nr != 0) {
++		irq = notify_list[--notify_nr];
++		rx_notify[irq] = 0;
++		notify_remote_via_irq(irq);
++	}
 +
-+	return 1;
++	/* More work to do? */
++	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++		tasklet_schedule(&net_rx_tasklet);
++#if 0
++	else
++		xen_network_done_notify();
++#endif
 +}
 +
-+/*
-+ * Interface to generic handling in irq.c
-+ */
-+
-+static unsigned int startup_dynirq(unsigned int irq)
++static void net_alarm(unsigned long unused)
 +{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		unmask_evtchn(evtchn);
-+	return 0;
++	tasklet_schedule(&net_rx_tasklet);
 +}
 +
-+static void shutdown_dynirq(unsigned int irq)
++static void netbk_tx_pending_timeout(unsigned long unused)
 +{
-+	int evtchn = evtchn_from_irq(irq);
++	tasklet_schedule(&net_tx_tasklet);
++}
 +
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	return &netif->stats;
 +}
 +
-+static void enable_dynirq(unsigned int irq)
++static int __on_net_schedule_list(netif_t *netif)
 +{
-+	int evtchn = evtchn_from_irq(irq);
++	return netif->list.next != NULL;
++}
 +
-+	if (VALID_EVTCHN(evtchn))
-+		unmask_evtchn(evtchn);
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++	spin_lock_irq(&net_schedule_list_lock);
++	if (likely(__on_net_schedule_list(netif))) {
++		list_del(&netif->list);
++		netif->list.next = NULL;
++		netif_put(netif);
++	}
++	spin_unlock_irq(&net_schedule_list_lock);
 +}
 +
-+static void disable_dynirq(unsigned int irq)
++static void add_to_net_schedule_list_tail(netif_t *netif)
 +{
-+	int evtchn = evtchn_from_irq(irq);
++	if (__on_net_schedule_list(netif))
++		return;
 +
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
++	spin_lock_irq(&net_schedule_list_lock);
++	if (!__on_net_schedule_list(netif) &&
++	    likely(netif_schedulable(netif))) {
++		list_add_tail(&netif->list, &net_schedule_list);
++		netif_get(netif);
++	}
++	spin_unlock_irq(&net_schedule_list_lock);
 +}
 +
-+static void ack_dynirq(unsigned int irq)
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
 +{
-+	int evtchn = evtchn_from_irq(irq);
++	int more_to_do;
 +
-+	move_native_irq(irq);
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++	more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++	RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
 +
-+	if (VALID_EVTCHN(evtchn)) {
-+		mask_evtchn(evtchn);
-+		clear_evtchn(evtchn);
++	if (more_to_do) {
++		add_to_net_schedule_list_tail(netif);
++		maybe_schedule_tx_action();
 +	}
 +}
 +
-+static void end_dynirq(unsigned int irq)
++void netif_deschedule_work(netif_t *netif)
 +{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
-+		unmask_evtchn(evtchn);
++	remove_from_net_schedule_list(netif);
 +}
 +
-+static struct hw_interrupt_type dynirq_type = {
-+	.typename = "Dynamic-irq",
-+	.startup  = startup_dynirq,
-+	.shutdown = shutdown_dynirq,
-+	.enable   = enable_dynirq,
-+	.disable  = disable_dynirq,
-+	.ack      = ack_dynirq,
-+	.end      = end_dynirq,
-+#ifdef CONFIG_SMP
-+	.set_affinity = set_affinity_irq,
-+#endif
-+	.retrigger = resend_irq_on_evtchn,
-+};
 +
-+static inline void pirq_unmask_notify(int pirq)
++static void tx_add_credit(netif_t *netif)
 +{
-+	struct physdev_eoi eoi = { .irq = pirq };
-+	if (unlikely(test_bit(pirq, pirq_needs_eoi)))
-+		(void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
++	unsigned long max_burst, max_credit;
++
++	/*
++	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++	 * Otherwise the interface can seize up due to insufficient credit.
++	 */
++	max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++	max_burst = min(max_burst, 131072UL);
++	max_burst = max(max_burst, netif->credit_bytes);
++
++	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
++	max_credit = netif->remaining_credit + netif->credit_bytes;
++	if (max_credit < netif->remaining_credit)
++		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++
++	netif->remaining_credit = min(max_credit, max_burst);
 +}
 +
-+static inline void pirq_query_unmask(int pirq)
++static void tx_credit_callback(unsigned long data)
 +{
-+	struct physdev_irq_status_query irq_status;
-+	irq_status.irq = pirq;
-+	(void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
-+	clear_bit(pirq, pirq_needs_eoi);
-+	if (irq_status.flags & XENIRQSTAT_needs_eoi)
-+		set_bit(pirq, pirq_needs_eoi);
++	netif_t *netif = (netif_t *)data;
++	tx_add_credit(netif);
++	netif_schedule_work(netif);
 +}
 +
-+/*
-+ * On startup, if there is no action associated with the IRQ then we are
-+ * probing. In this case we should not share with others as it will confuse us.
-+ */
-+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++static inline int copy_pending_req(PEND_RING_IDX pending_idx)
++{
++	return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
++				      &mmap_pages[pending_idx]);
++}
 +
-+static unsigned int startup_pirq(unsigned int irq)
++inline static void net_tx_action_dealloc(void)
 +{
-+	struct evtchn_bind_pirq bind_pirq;
-+	int evtchn = evtchn_from_irq(irq);
++	struct netbk_tx_pending_inuse *inuse, *n;
++	gnttab_unmap_grant_ref_t *gop;
++	u16 pending_idx;
++	PEND_RING_IDX dc, dp;
++	netif_t *netif;
++	int ret;
++	LIST_HEAD(list);
 +
-+	if (VALID_EVTCHN(evtchn))
-+		goto out;
++	dc = dealloc_cons;
++	gop = tx_unmap_ops;
 +
-+	bind_pirq.pirq  = irq;
-+	/* NB. We are happy to share unless we are probing. */
-+	bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-+	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
-+		if (!probing_irq(irq))
-+			printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
-+			       irq);
-+		return 0;
-+	}
-+	evtchn = bind_pirq.port;
++	/*
++	 * Free up any grants we have finished using
++	 */
++	do {
++		dp = dealloc_prod;
 +
-+	pirq_query_unmask(irq_to_pirq(irq));
++		/* Ensure we see all indices enqueued by netif_idx_release(). */
++		smp_rmb();
 +
-+	evtchn_to_irq[evtchn] = irq;
-+	bind_evtchn_to_cpu(evtchn, 0);
-+	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
++		while (dc != dp) {
++			unsigned long pfn;
 +
-+ out:
-+	unmask_evtchn(evtchn);
-+	pirq_unmask_notify(irq_to_pirq(irq));
++			pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++			list_move_tail(&pending_inuse[pending_idx].list, &list);
 +
-+	return 0;
-+}
++			pfn = idx_to_pfn(pending_idx);
++			/* Already unmapped? */
++			if (!phys_to_machine_mapping_valid(pfn))
++				continue;
 +
-+static void shutdown_pirq(unsigned int irq)
-+{
-+	struct evtchn_close close;
-+	int evtchn = evtchn_from_irq(irq);
++			gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
++					    GNTMAP_host_map,
++					    grant_tx_handle[pending_idx]);
++			gop++;
++		}
 +
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
++		if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
++		    list_empty(&pending_inuse_head))
++			break;
 +
-+	mask_evtchn(evtchn);
++		/* Copy any entries that have been pending for too long. */
++		list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
++			if (time_after(inuse->alloc_time + HZ / 2, jiffies))
++				break;
 +
-+	close.port = evtchn;
-+	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
-+		BUG();
++			switch (copy_pending_req(inuse - pending_inuse)) {
++			case 0:
++				list_move_tail(&inuse->list, &list);
++				continue;
++			case -EBUSY:
++				list_del_init(&inuse->list);
++				continue;
++			case -ENOENT:
++				continue;
++			}
 +
-+	bind_evtchn_to_cpu(evtchn, 0);
-+	evtchn_to_irq[evtchn] = -1;
-+	irq_info[irq] = IRQ_UNBOUND;
-+}
++			break;
++		}
++	} while (dp != dealloc_prod);
 +
-+static void enable_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
++	dealloc_cons = dc;
 +
-+	if (VALID_EVTCHN(evtchn)) {
-+		unmask_evtchn(evtchn);
-+		pirq_unmask_notify(irq_to_pirq(irq));
-+	}
-+}
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++	BUG_ON(ret);
 +
-+static void disable_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
++	list_for_each_entry_safe(inuse, n, &list, list) {
++		pending_idx = inuse - pending_inuse;
 +
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
-+}
++		netif = pending_tx_info[pending_idx].netif;
 +
-+static void ack_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
++		make_tx_response(netif, &pending_tx_info[pending_idx].req, 
++				 NETIF_RSP_OKAY);
 +
-+	move_native_irq(irq);
++		/* Ready for next use. */
++		gnttab_reset_grant_page(mmap_pages[pending_idx]);
 +
-+	if (VALID_EVTCHN(evtchn)) {
-+		mask_evtchn(evtchn);
-+		clear_evtchn(evtchn);
++		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++
++		netif_put(netif);
++
++		list_del_init(&inuse->list);
 +	}
 +}
 +
-+static void end_pirq(unsigned int irq)
++static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
 +{
-+	int evtchn = evtchn_from_irq(irq);
++	RING_IDX cons = netif->tx.req_cons;
 +
-+	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
-+		unmask_evtchn(evtchn);
-+		pirq_unmask_notify(irq_to_pirq(irq));
-+	}
++	do {
++		make_tx_response(netif, txp, NETIF_RSP_ERROR);
++		if (cons >= end)
++			break;
++		txp = RING_GET_REQUEST(&netif->tx, cons++);
++	} while (1);
++	netif->tx.req_cons = cons;
++	netif_schedule_work(netif);
++	netif_put(netif);
 +}
 +
-+static struct hw_interrupt_type pirq_type = {
-+	.typename = "Phys-irq",
-+	.startup  = startup_pirq,
-+	.shutdown = shutdown_pirq,
-+	.enable   = enable_pirq,
-+	.disable  = disable_pirq,
-+	.ack      = ack_pirq,
-+	.end      = end_pirq,
-+#ifdef CONFIG_SMP
-+	.set_affinity = set_affinity_irq,
-+#endif
-+	.retrigger = resend_irq_on_evtchn,
-+};
-+
-+int irq_ignore_unhandled(unsigned int irq)
++static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
++				netif_tx_request_t *txp, int work_to_do)
 +{
-+	struct physdev_irq_status_query irq_status = { .irq = irq };
++	RING_IDX cons = netif->tx.req_cons;
++	int frags = 0;
 +
-+	if (!is_running_on_xen())
++	if (!(first->flags & NETTXF_more_data))
 +		return 0;
 +
-+	(void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
-+	return !!(irq_status.flags & XENIRQSTAT_shared);
-+}
++	do {
++		if (frags >= work_to_do) {
++			DPRINTK("Need more frags\n");
++			return -frags;
++		}
 +
-+void notify_remote_via_irq(int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
++		if (unlikely(frags >= MAX_SKB_FRAGS)) {
++			DPRINTK("Too many frags\n");
++			return -frags;
++		}
 +
-+	if (VALID_EVTCHN(evtchn))
-+		notify_remote_via_evtchn(evtchn);
-+}
-+EXPORT_SYMBOL_GPL(notify_remote_via_irq);
++		memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++		       sizeof(*txp));
++		if (txp->size > first->size) {
++			DPRINTK("Frags galore\n");
++			return -frags;
++		}
 +
-+int irq_to_evtchn_port(int irq)
-+{
-+	return evtchn_from_irq(irq);
-+}
-+EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
++		first->size -= txp->size;
++		frags++;
 +
-+void mask_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	synch_set_bit(port, s->evtchn_mask);
++		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++			DPRINTK("txp->offset: %x, size: %u\n",
++				txp->offset, txp->size);
++			return -frags;
++		}
++	} while ((txp++)->flags & NETTXF_more_data);
++
++	return frags;
 +}
-+EXPORT_SYMBOL_GPL(mask_evtchn);
 +
-+void unmask_evtchn(int port)
++static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++						  struct sk_buff *skb,
++						  netif_tx_request_t *txp,
++						  gnttab_map_grant_ref_t *mop)
 +{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	unsigned int cpu = smp_processor_id();
-+	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++	struct skb_shared_info *shinfo = skb_shinfo(skb);
++	skb_frag_t *frags = shinfo->frags;
++	unsigned long pending_idx = *((u16 *)skb->data);
++	int i, start;
 +
-+	BUG_ON(!irqs_disabled());
++	/* Skip first skb fragment if it is on same page as header fragment. */
++	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
 +
-+	/* Slow path (hypercall) if this is a non-local port. */
-+	if (unlikely(cpu != cpu_from_evtchn(port))) {
-+		struct evtchn_unmask unmask = { .port = port };
-+		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
-+		return;
-+	}
++	for (i = start; i < shinfo->nr_frags; i++, txp++) {
++		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
 +
-+	synch_clear_bit(port, s->evtchn_mask);
++		gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++				  GNTMAP_host_map | GNTMAP_readonly,
++				  txp->gref, netif->domid);
 +
-+	/* Did we miss an interrupt 'edge'? Re-fire if so. */
-+	if (synch_test_bit(port, s->evtchn_pending) &&
-+	    !synch_test_and_set_bit(port / BITS_PER_LONG,
-+				    &vcpu_info->evtchn_pending_sel))
-+		vcpu_info->evtchn_upcall_pending = 1;
++		memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++		netif_get(netif);
++		pending_tx_info[pending_idx].netif = netif;
++		frags[i].page = (void *)pending_idx;
++	}
++
++	return mop;
 +}
-+EXPORT_SYMBOL_GPL(unmask_evtchn);
 +
-+static void restore_cpu_virqs(int cpu)
++static int netbk_tx_check_mop(struct sk_buff *skb,
++			       gnttab_map_grant_ref_t **mopp)
 +{
-+	struct evtchn_bind_virq bind_virq;
-+	int virq, irq, evtchn;
-+
-+	for (virq = 0; virq < NR_VIRQS; virq++) {
-+		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
-+			continue;
-+
-+		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
++	gnttab_map_grant_ref_t *mop = *mopp;
++	int pending_idx = *((u16 *)skb->data);
++	netif_t *netif = pending_tx_info[pending_idx].netif;
++	netif_tx_request_t *txp;
++	struct skb_shared_info *shinfo = skb_shinfo(skb);
++	int nr_frags = shinfo->nr_frags;
++	int i, err, start;
 +
-+		/* Get a new binding from Xen. */
-+		bind_virq.virq = virq;
-+		bind_virq.vcpu = cpu;
-+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+						&bind_virq) != 0)
-+			BUG();
-+		evtchn = bind_virq.port;
++	/* Check status of header. */
++	err = mop->status;
++	if (unlikely(err)) {
++		txp = &pending_tx_info[pending_idx].req;
++		make_tx_response(netif, txp, NETIF_RSP_ERROR);
++		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++		netif_put(netif);
++	} else {
++		set_phys_to_machine(
++			__pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++		grant_tx_handle[pending_idx] = mop->handle;
++	}
 +
-+		/* Record the new mapping. */
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+		bind_evtchn_to_cpu(evtchn, cpu);
++	/* Skip first skb fragment if it is on same page as header fragment. */
++	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
 +
-+		/* Ready for use. */
-+		unmask_evtchn(evtchn);
-+	}
-+}
++	for (i = start; i < nr_frags; i++) {
++		int j, newerr;
 +
-+static void restore_cpu_ipis(int cpu)
-+{
-+	struct evtchn_bind_ipi bind_ipi;
-+	int ipi, irq, evtchn;
++		pending_idx = (unsigned long)shinfo->frags[i].page;
 +
-+	for (ipi = 0; ipi < NR_IPIS; ipi++) {
-+		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
++		/* Check error status: if okay then remember grant handle. */
++		newerr = (++mop)->status;
++		if (likely(!newerr)) {
++			set_phys_to_machine(
++				__pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++				FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++			grant_tx_handle[pending_idx] = mop->handle;
++			/* Had a previous error? Invalidate this fragment. */
++			if (unlikely(err))
++				netif_idx_release(pending_idx);
 +			continue;
++		}
 +
-+		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-+
-+		/* Get a new binding from Xen. */
-+		bind_ipi.vcpu = cpu;
-+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
-+						&bind_ipi) != 0)
-+			BUG();
-+		evtchn = bind_ipi.port;
++		/* Error on this fragment: respond to client with an error. */
++		txp = &pending_tx_info[pending_idx].req;
++		make_tx_response(netif, txp, NETIF_RSP_ERROR);
++		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++		netif_put(netif);
 +
-+		/* Record the new mapping. */
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+		bind_evtchn_to_cpu(evtchn, cpu);
++		/* Not the first error? Preceding frags already invalidated. */
++		if (err)
++			continue;
 +
-+		/* Ready for use. */
-+		unmask_evtchn(evtchn);
++		/* First error: invalidate header and preceding fragments. */
++		pending_idx = *((u16 *)skb->data);
++		netif_idx_release(pending_idx);
++		for (j = start; j < i; j++) {
++			pending_idx = (unsigned long)shinfo->frags[i].page;
++			netif_idx_release(pending_idx);
++		}
 +
++		/* Remember the error: invalidate all subsequent fragments. */
++		err = newerr;
 +	}
++
++	*mopp = mop + 1;
++	return err;
 +}
 +
-+void irq_resume(void)
++static void netbk_fill_frags(struct sk_buff *skb)
 +{
-+	int cpu, pirq, irq, evtchn;
++	struct skb_shared_info *shinfo = skb_shinfo(skb);
++	int nr_frags = shinfo->nr_frags;
++	int i;
 +
-+	init_evtchn_cpu_bindings();
++	for (i = 0; i < nr_frags; i++) {
++		skb_frag_t *frag = shinfo->frags + i;
++		netif_tx_request_t *txp;
++		unsigned long pending_idx;
 +
-+	/* New event-channel space is not 'live' yet. */
-+	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+		mask_evtchn(evtchn);
++		pending_idx = (unsigned long)frag->page;
 +
-+	/* Check that no PIRQs are still bound. */
-+	for (pirq = 0; pirq < NR_PIRQS; pirq++)
-+		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
++		pending_inuse[pending_idx].alloc_time = jiffies;
++		list_add_tail(&pending_inuse[pending_idx].list,
++			      &pending_inuse_head);
 +
-+	/* No IRQ <-> event-channel mappings. */
-+	for (irq = 0; irq < NR_IRQS; irq++)
-+		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
-+	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+		evtchn_to_irq[evtchn] = -1;
++		txp = &pending_tx_info[pending_idx].req;
++		frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++		frag->size = txp->size;
++		frag->page_offset = txp->offset;
 +
-+	for_each_possible_cpu(cpu) {
-+		restore_cpu_virqs(cpu);
-+		restore_cpu_ipis(cpu);
++		skb->len += txp->size;
++		skb->data_len += txp->size;
++		skb->truesize += txp->size;
 +	}
-+
 +}
 +
-+void __init xen_init_IRQ(void)
++int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++		     int work_to_do)
 +{
-+	int i;
++	struct netif_extra_info extra;
++	RING_IDX cons = netif->tx.req_cons;
 +
-+	init_evtchn_cpu_bindings();
++	do {
++		if (unlikely(work_to_do-- <= 0)) {
++			DPRINTK("Missing extra info\n");
++			return -EBADR;
++		}
 +
-+	/* No event channels are 'live' right now. */
-+	for (i = 0; i < NR_EVENT_CHANNELS; i++)
-+		mask_evtchn(i);
++		memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++		       sizeof(extra));
++		if (unlikely(!extra.type ||
++			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++			netif->tx.req_cons = ++cons;
++			DPRINTK("Invalid extra type: %d\n", extra.type);
++			return -EINVAL;
++		}
 +
-+	/* No IRQ -> event-channel mappings. */
-+	for (i = 0; i < NR_IRQS; i++)
-+		irq_info[i] = IRQ_UNBOUND;
++		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++		netif->tx.req_cons = ++cons;
++	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 +
-+	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-+	for (i = 0; i < NR_DYNIRQS; i++) {
-+		irq_bindcount[dynirq_to_irq(i)] = 0;
++	return work_to_do;
++}
 +
-+		irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
-+		irq_desc[dynirq_to_irq(i)].action = NULL;
-+		irq_desc[dynirq_to_irq(i)].depth = 1;
-+		irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
++static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
++{
++	if (!gso->u.gso.size) {
++		DPRINTK("GSO size must not be zero.\n");
++		return -EINVAL;
++	}
++
++	/* Currently only TCPv4 S.O. is supported. */
++	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++		DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++		return -EINVAL;
 +	}
 +
-+	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-+	for (i = 0; i < NR_PIRQS; i++) {
-+		irq_bindcount[pirq_to_irq(i)] = 1;
++	skb_shinfo(skb)->gso_size = gso->u.gso.size;
++	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 +
-+#ifdef RTC_IRQ
-+		/* If not domain 0, force our RTC driver to fail its probe. */
-+		if ((i == RTC_IRQ) && !is_initial_xendomain())
-+			continue;
-+#endif
++	/* Header must be checked, and gso_segs computed. */
++	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++	skb_shinfo(skb)->gso_segs = 0;
 +
-+		irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
-+		irq_desc[pirq_to_irq(i)].action = NULL;
-+		irq_desc[pirq_to_irq(i)].depth = 1;
-+		irq_desc[pirq_to_irq(i)].chip = &pirq_type;
-+	}
++	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/features.c tmp-linux-2.6-xen.patch/drivers/xen/core/features.c
---- pristine-linux-2.6.18.2/drivers/xen/core/features.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/features.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,34 @@
-+/******************************************************************************
-+ * features.c
-+ *
-+ * Xen feature flags.
-+ *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
-+ */
-+#include <linux/types.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++	struct list_head *ent;
++	struct sk_buff *skb;
++	netif_t *netif;
++	netif_tx_request_t txreq;
++	netif_tx_request_t txfrags[MAX_SKB_FRAGS];
++	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++	u16 pending_idx;
++	RING_IDX i;
++	gnttab_map_grant_ref_t *mop;
++	unsigned int data_len;
++	int ret, work_to_do;
 +
-+u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
-+/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
-+EXPORT_SYMBOL(xen_features);
++	if (dealloc_cons != dealloc_prod)
++		net_tx_action_dealloc();
 +
-+void setup_xen_features(void)
-+{
-+	xen_feature_info_t fi;
-+	int i, j;
++	mop = tx_map_ops;
++	while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++		!list_empty(&net_schedule_list)) {
++		/* Get a netif from the list with work to do. */
++		ent = net_schedule_list.next;
++		netif = list_entry(ent, netif_t, list);
++		netif_get(netif);
++		remove_from_net_schedule_list(netif);
 +
-+	for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
-+		fi.submap_idx = i;
-+		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
-+			break;
-+		for (j=0; j<32; j++)
-+			xen_features[i*32+j] = !!(fi.submap & 1<<j);
-+	}
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/gnttab.c tmp-linux-2.6-xen.patch/drivers/xen/core/gnttab.c
---- pristine-linux-2.6.18.2/drivers/xen/core/gnttab.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/gnttab.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,631 @@
-+/******************************************************************************
-+ * gnttab.c
-+ *
-+ * Granting foreign access to our memory reservation.
-+ *
-+ * Copyright (c) 2005-2006, Christopher Clark
-+ * Copyright (c) 2004-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++		if (!work_to_do) {
++			netif_put(netif);
++			continue;
++		}
 +
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <xen/interface/xen.h>
-+#include <xen/gnttab.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/synch_bitops.h>
-+#include <asm/io.h>
-+#include <xen/interface/memory.h>
-+#include <xen/driver_util.h>
++		i = netif->tx.req_cons;
++		rmb(); /* Ensure that we see the request before we copy it. */
++		memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++		/* Credit-based scheduling. */
++		if (txreq.size > netif->remaining_credit) {
++			unsigned long now = jiffies;
++			unsigned long next_credit = 
++				netif->credit_timeout.expires +
++				msecs_to_jiffies(netif->credit_usec / 1000);
 +
-+/* External tools reserve first few grant table entries. */
-+#define NR_RESERVED_ENTRIES 8
-+#define GNTTAB_LIST_END 0xffffffff
-+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
++			/* Timer could already be pending in rare cases. */
++			if (timer_pending(&netif->credit_timeout)) {
++				netif_put(netif);
++				continue;
++			}
 +
-+static grant_ref_t **gnttab_list;
-+static unsigned int nr_grant_frames;
-+static unsigned int boot_max_nr_grant_frames;
-+static int gnttab_free_count;
-+static grant_ref_t gnttab_free_head;
-+static DEFINE_SPINLOCK(gnttab_list_lock);
++			/* Passed the point where we can replenish credit? */
++			if (time_after_eq(now, next_credit)) {
++				netif->credit_timeout.expires = now;
++				tx_add_credit(netif);
++			}
 +
-+static struct grant_entry *shared;
++			/* Still too big to send right now? Set a callback. */
++			if (txreq.size > netif->remaining_credit) {
++				netif->credit_timeout.data     =
++					(unsigned long)netif;
++				netif->credit_timeout.function =
++					tx_credit_callback;
++				__mod_timer(&netif->credit_timeout,
++					    next_credit);
++				netif_put(netif);
++				continue;
++			}
++		}
++		netif->remaining_credit -= txreq.size;
 +
-+static struct gnttab_free_callback *gnttab_free_callback_list;
++		work_to_do--;
++		netif->tx.req_cons = ++i;
 +
-+static int gnttab_expand(unsigned int req_entries);
++		memset(extras, 0, sizeof(extras));
++		if (txreq.flags & NETTXF_extra_info) {
++			work_to_do = netbk_get_extras(netif, extras,
++						      work_to_do);
++			i = netif->tx.req_cons;
++			if (unlikely(work_to_do < 0)) {
++				netbk_tx_err(netif, &txreq, i);
++				continue;
++			}
++		}
 +
-+#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
-+#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
++		ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++		if (unlikely(ret < 0)) {
++			netbk_tx_err(netif, &txreq, i - ret);
++			continue;
++		}
++		i += ret;
 +
-+static int get_free_entries(int count)
-+{
-+	unsigned long flags;
-+	int ref, rc;
-+	grant_ref_t head;
++		if (unlikely(txreq.size < ETH_HLEN)) {
++			DPRINTK("Bad packet size: %d\n", txreq.size);
++			netbk_tx_err(netif, &txreq, i);
++			continue;
++		}
 +
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
++		/* No crossing a page as the payload mustn't fragment. */
++		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++			DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
++				txreq.offset, txreq.size, 
++				(txreq.offset &~PAGE_MASK) + txreq.size);
++			netbk_tx_err(netif, &txreq, i);
++			continue;
++		}
 +
-+	if ((gnttab_free_count < count) &&
-+	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
-+		spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+		return rc;
-+	}
++		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
 +
-+	ref = head = gnttab_free_head;
-+	gnttab_free_count -= count;
-+	while (count-- > 1)
-+		head = gnttab_entry(head);
-+ 	gnttab_free_head = gnttab_entry(head);
-+	gnttab_entry(head) = GNTTAB_LIST_END;
++		data_len = (txreq.size > PKT_PROT_LEN &&
++			    ret < MAX_SKB_FRAGS) ?
++			PKT_PROT_LEN : txreq.size;
 +
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++		skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++				GFP_ATOMIC | __GFP_NOWARN);
++		if (unlikely(skb == NULL)) {
++			DPRINTK("Can't allocate a skb in start_xmit.\n");
++			netbk_tx_err(netif, &txreq, i);
++			break;
++		}
 +
-+	return ref;
-+}
++		/* Packets passed to netif_rx() must have some headroom. */
++		skb_reserve(skb, 16 + NET_IP_ALIGN);
 +
-+#define get_free_entry() get_free_entries(1)
++		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++			struct netif_extra_info *gso;
++			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 +
-+static void do_free_callbacks(void)
-+{
-+	struct gnttab_free_callback *callback, *next;
++			if (netbk_set_skb_gso(skb, gso)) {
++				kfree_skb(skb);
++				netbk_tx_err(netif, &txreq, i);
++				continue;
++			}
++		}
 +
-+	callback = gnttab_free_callback_list;
-+	gnttab_free_callback_list = NULL;
++		gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++				  GNTMAP_host_map | GNTMAP_readonly,
++				  txreq.gref, netif->domid);
++		mop++;
 +
-+	while (callback != NULL) {
-+		next = callback->next;
-+		if (gnttab_free_count >= callback->count) {
-+			callback->next = NULL;
-+			callback->fn(callback->arg);
++		memcpy(&pending_tx_info[pending_idx].req,
++		       &txreq, sizeof(txreq));
++		pending_tx_info[pending_idx].netif = netif;
++		*((u16 *)skb->data) = pending_idx;
++
++		__skb_put(skb, data_len);
++
++		skb_shinfo(skb)->nr_frags = ret;
++		if (data_len < txreq.size) {
++			skb_shinfo(skb)->nr_frags++;
++			skb_shinfo(skb)->frags[0].page =
++				(void *)(unsigned long)pending_idx;
 +		} else {
-+			callback->next = gnttab_free_callback_list;
-+			gnttab_free_callback_list = callback;
++			/* Discriminate from any valid pending_idx value. */
++			skb_shinfo(skb)->frags[0].page = (void *)~0UL;
 +		}
-+		callback = next;
-+	}
-+}
 +
-+static inline void check_free_callbacks(void)
-+{
-+	if (unlikely(gnttab_free_callback_list))
-+		do_free_callbacks();
-+}
++		__skb_queue_tail(&tx_queue, skb);
 +
-+static void put_free_entry(grant_ref_t ref)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	gnttab_entry(ref) = gnttab_free_head;
-+	gnttab_free_head = ref;
-+	gnttab_free_count++;
-+	check_free_callbacks();
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
++		pending_cons++;
 +
-+/*
-+ * Public grant-issuing interface functions
-+ */
++		mop = netbk_get_requests(netif, skb, txfrags, mop);
 +
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+				int readonly)
-+{
-+	int ref;
++		netif->tx.req_cons = i;
++		netif_schedule_work(netif);
 +
-+	if (unlikely((ref = get_free_entry()) < 0))
-+		return -ENOSPC;
++		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++			break;
++	}
 +
-+	shared[ref].frame = frame;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
++	if (mop == tx_map_ops)
++		return;
 +
-+	return ref;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++	BUG_ON(ret);
 +
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+				     unsigned long frame, int readonly)
-+{
-+	shared[ref].frame = frame;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
++	mop = tx_map_ops;
++	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++		netif_tx_request_t *txp;
 +
++		pending_idx = *((u16 *)skb->data);
++		netif       = pending_tx_info[pending_idx].netif;
++		txp         = &pending_tx_info[pending_idx].req;
 +
-+int gnttab_query_foreign_access(grant_ref_t ref)
-+{
-+	u16 nflags;
++		/* Check the remap error code. */
++		if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++			DPRINTK("netback grant failed.\n");
++			skb_shinfo(skb)->nr_frags = 0;
++			kfree_skb(skb);
++			continue;
++		}
 +
-+	nflags = shared[ref].flags;
++		data_len = skb->len;
++		memcpy(skb->data,
++		       (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++		       data_len);
++		if (data_len < txp->size) {
++			/* Append the packet payload as a fragment. */
++			txp->offset += data_len;
++			txp->size -= data_len;
++		} else {
++			/* Schedule a response immediately. */
++			netif_idx_release(pending_idx);
++		}
 +
-+	return (nflags & (GTF_reading|GTF_writing));
-+}
-+EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
++		/*
++		 * Old frontends do not assert data_validated but we
++		 * can infer it from csum_blank so test both flags.
++		 */
++		if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++			skb->proto_data_valid = 1;
++		} else {
++			skb->ip_summed = CHECKSUM_NONE;
++			skb->proto_data_valid = 0;
++		}
++		skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
 +
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
-+{
-+	u16 flags, nflags;
++		netbk_fill_frags(skb);
 +
-+	nflags = shared[ref].flags;
-+	do {
-+		if ((flags = nflags) & (GTF_reading|GTF_writing)) {
-+			printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
-+			return 0;
++		skb->dev      = netif->dev;
++		skb->protocol = eth_type_trans(skb, skb->dev);
++
++		netif->stats.rx_bytes += skb->len;
++		netif->stats.rx_packets++;
++
++		if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
++		    unlikely(skb_linearize(skb))) {
++			DPRINTK("Can't linearize skb in net_tx_action.\n");
++			kfree_skb(skb);
++			continue;
 +		}
-+	} while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
-+		 flags);
 +
-+	return 1;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
++		netif_rx(skb);
++		netif->dev->last_rx = jiffies;
++	}
 +
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+			       unsigned long page)
-+{
-+	if (gnttab_end_foreign_access_ref(ref, readonly)) {
-+		put_free_entry(ref);
-+		if (page != 0)
-+			free_page(page);
-+	} else {
-+		/* XXX This needs to be fixed so that the ref and page are
-+		   placed on a list to be freed up later. */
-+		printk(KERN_DEBUG
-+		       "WARNING: leaking g.e. and page still in use!\n");
++	if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++	    !list_empty(&pending_inuse_head)) {
++		struct netbk_tx_pending_inuse *oldest;
++
++		oldest = list_entry(pending_inuse_head.next,
++				    struct netbk_tx_pending_inuse, list);
++		mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
 +	}
 +}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 +
-+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
++static void netif_idx_release(u16 pending_idx)
 +{
-+	int ref;
++	static DEFINE_SPINLOCK(_lock);
++	unsigned long flags;
 +
-+	if (unlikely((ref = get_free_entry()) < 0))
-+		return -ENOSPC;
-+	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
++	spin_lock_irqsave(&_lock, flags);
++	dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++	/* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
++	smp_wmb();
++	dealloc_prod++;
++	spin_unlock_irqrestore(&_lock, flags);
 +
-+	return ref;
++	tasklet_schedule(&net_tx_tasklet);
 +}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 +
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
-+				       unsigned long pfn)
++static void netif_page_release(struct page *page)
 +{
-+	shared[ref].frame = pfn;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_accept_transfer;
++	netif_idx_release(netif_page_index(page));
 +}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 +
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	unsigned long frame;
-+	u16           flags;
-+
-+	/*
-+	 * If a transfer is not even yet started, try to reclaim the grant
-+	 * reference and return failure (== 0).
-+	 */
-+	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-+		if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
-+			return 0;
-+		cpu_relax();
-+	}
++	netif_t *netif = dev_id;
 +
-+	/* If a transfer is in progress then wait until it is completed. */
-+	while (!(flags & GTF_transfer_completed)) {
-+		flags = shared[ref].flags;
-+		cpu_relax();
-+	}
++	add_to_net_schedule_list_tail(netif);
++	maybe_schedule_tx_action();
 +
-+	/* Read the frame number /after/ reading completion status. */
-+	rmb();
-+	frame = shared[ref].frame;
-+	BUG_ON(frame == 0);
++	if (netif_schedulable(netif) && !netbk_queue_full(netif))
++		netif_wake_queue(netif->dev);
 +
-+	return frame;
++	return IRQ_HANDLED;
 +}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 +
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
++static void make_tx_response(netif_t *netif, 
++			     netif_tx_request_t *txp,
++			     s8       st)
 +{
-+	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
-+	put_free_entry(ref);
-+	return frame;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
++	RING_IDX i = netif->tx.rsp_prod_pvt;
++	netif_tx_response_t *resp;
++	int notify;
 +
-+void gnttab_free_grant_reference(grant_ref_t ref)
-+{
-+	put_free_entry(ref);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
++	resp = RING_GET_RESPONSE(&netif->tx, i);
++	resp->id     = txp->id;
++	resp->status = st;
 +
-+void gnttab_free_grant_references(grant_ref_t head)
-+{
-+	grant_ref_t ref;
-+	unsigned long flags;
-+	int count = 1;
-+	if (head == GNTTAB_LIST_END)
-+		return;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	ref = head;
-+	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
-+		ref = gnttab_entry(ref);
-+		count++;
-+	}
-+	gnttab_entry(ref) = gnttab_free_head;
-+	gnttab_free_head = head;
-+	gnttab_free_count += count;
-+	check_free_callbacks();
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++	if (txp->flags & NETTXF_extra_info)
++		RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++
++	netif->tx.rsp_prod_pvt = ++i;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++	if (notify)
++		notify_remote_via_irq(netif->irq);
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++	if (i == netif->tx.req_cons) {
++		int more_to_do;
++		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++		if (more_to_do)
++			add_to_net_schedule_list_tail(netif);
++	}
++#endif
 +}
-+EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 +
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++static netif_rx_response_t *make_rx_response(netif_t *netif, 
++					     u16      id, 
++					     s8       st,
++					     u16      offset,
++					     u16      size,
++					     u16      flags)
 +{
-+	int h = get_free_entries(count);
++	RING_IDX i = netif->rx.rsp_prod_pvt;
++	netif_rx_response_t *resp;
 +
-+	if (h < 0)
-+		return -ENOSPC;
++	resp = RING_GET_RESPONSE(&netif->rx, i);
++	resp->offset     = offset;
++	resp->flags      = flags;
++	resp->id         = id;
++	resp->status     = (s16)size;
++	if (st < 0)
++		resp->status = (s16)st;
 +
-+	*head = h;
++	netif->rx.rsp_prod_pvt = ++i;
 +
-+	return 0;
++	return resp;
 +}
-+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 +
-+int gnttab_empty_grant_references(const grant_ref_t *private_head)
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	return (*private_head == GNTTAB_LIST_END);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
++	struct list_head *ent;
++	netif_t *netif;
++	int i = 0;
 +
-+int gnttab_claim_grant_reference(grant_ref_t *private_head)
-+{
-+	grant_ref_t g = *private_head;
-+	if (unlikely(g == GNTTAB_LIST_END))
-+		return -ENOSPC;
-+	*private_head = gnttab_entry(g);
-+	return g;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
++	printk(KERN_ALERT "netif_schedule_list:\n");
++	spin_lock_irq(&net_schedule_list_lock);
 +
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+				    grant_ref_t release)
-+{
-+	gnttab_entry(release) = *private_head;
-+	*private_head = release;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
++	list_for_each (ent, &net_schedule_list) {
++		netif = list_entry(ent, netif_t, list);
++		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++		       "rx_resp_prod=%08x\n",
++		       i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
++		       netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
++		       "rx_resp_prod=%08x\n",
++		       netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
++		       netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
++		       netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++		i++;
++	}
 +
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+				  void (*fn)(void *), void *arg, u16 count)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	if (callback->next)
-+		goto out;
-+	callback->fn = fn;
-+	callback->arg = arg;
-+	callback->count = count;
-+	callback->next = gnttab_free_callback_list;
-+	gnttab_free_callback_list = callback;
-+	check_free_callbacks();
-+out:
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++	spin_unlock_irq(&net_schedule_list_lock);
++	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++
++	return IRQ_HANDLED;
 +}
-+EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
++#endif
 +
-+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
++static int __init netback_init(void)
 +{
-+	struct gnttab_free_callback **pcb;
-+	unsigned long flags;
++	int i;
++	struct page *page;
 +
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
-+		if (*pcb == callback) {
-+			*pcb = callback->next;
-+			break;
-+		}
-+	}
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+static int grow_gnttab_list(unsigned int more_frames)
-+{
-+	unsigned int new_nr_grant_frames, extra_entries, i;
++	/* We can increase reservation by this much in net_rx_action(). */
++	balloon_update_driver_allowance(NET_RX_RING_SIZE);
 +
-+	new_nr_grant_frames = nr_grant_frames + more_frames;
-+	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
++	skb_queue_head_init(&rx_queue);
++	skb_queue_head_init(&tx_queue);
 +
-+	for (i = nr_grant_frames; i < new_nr_grant_frames; i++)
-+	{
-+		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
-+		if (!gnttab_list[i])
-+			goto grow_nomem;
++	init_timer(&net_timer);
++	net_timer.data = 0;
++	net_timer.function = net_alarm;
++
++	init_timer(&netbk_tx_pending_timer);
++	netbk_tx_pending_timer.data = 0;
++	netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++
++	mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++	if (mmap_pages == NULL) {
++		printk("%s: out of memory\n", __FUNCTION__);
++		return -ENOMEM;
 +	}
 +
++	for (i = 0; i < MAX_PENDING_REQS; i++) {
++		page = mmap_pages[i];
++		SetPageForeign(page, netif_page_release);
++		netif_page_index(page) = i;
++		INIT_LIST_HEAD(&pending_inuse[i].list);
++	}
 +
-+	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-+	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
-+		gnttab_entry(i) = i + 1;
++	pending_cons = 0;
++	pending_prod = MAX_PENDING_REQS;
++	for (i = 0; i < MAX_PENDING_REQS; i++)
++		pending_ring[i] = i;
 +
-+	gnttab_entry(i) = gnttab_free_head;
-+	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-+	gnttab_free_count += extra_entries;
++	spin_lock_init(&net_schedule_list_lock);
++	INIT_LIST_HEAD(&net_schedule_list);
 +
-+	nr_grant_frames = new_nr_grant_frames;
++	netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
++	if (MODPARM_copy_skb) {
++		if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++					      NULL, 0))
++			netbk_copy_skb_mode = NETBK_ALWAYS_COPY_SKB;
++		else
++			netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
++	}
 +
-+	check_free_callbacks();
++	netif_accel_init();
++
++	netif_xenbus_init();
++
++#ifdef NETBE_DEBUG_INTERRUPT
++	(void)bind_virq_to_irqhandler(VIRQ_DEBUG,
++				      0,
++				      netif_be_dbg,
++				      SA_SHIRQ, 
++				      "net-be-dbg",
++				      &netif_be_dbg);
++#endif
 +
 +	return 0;
-+	
-+grow_nomem:
-+	for ( ; i >= nr_grant_frames; i--)
-+		free_page((unsigned long) gnttab_list[i]);
-+	return -ENOMEM;
 +}
 +
-+static unsigned int __max_nr_grant_frames(void)
-+{
-+	struct gnttab_query_size query;
-+	int rc;
++module_init(netback_init);
 +
-+	query.dom = DOMID_SELF;
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netback/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netback/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,454 @@
++/*  Xenbus code for netif backend
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++    Copyright (C) 2005 XenSource Ltd
 +
-+	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
-+	if ((rc < 0) || (query.status != GNTST_okay))
-+		return 4; /* Legacy max supported number of frames */
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
 +
-+	return query.max_nr_frames;
-+}
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
 +
-+static inline unsigned int max_nr_grant_frames(void)
-+{
-+	unsigned int xen_max = __max_nr_grant_frames();
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
 +
-+	if (xen_max > boot_max_nr_grant_frames)
-+		return boot_max_nr_grant_frames;
-+	return xen_max;
-+}
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
 +
-+#ifdef CONFIG_XEN
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++    printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
 +
-+#ifndef __ia64__
-+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-+		      unsigned long addr, void *data)
-+{
-+	unsigned long **frames = (unsigned long **)data;
 +
-+	set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
-+	(*frames)++;
-+	return 0;
-+}
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void backend_create_netif(struct backend_info *be);
 +
-+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-+			unsigned long addr, void *data)
++static int netback_remove(struct xenbus_device *dev)
 +{
++	struct backend_info *be = dev->dev.driver_data;
 +
-+	set_pte_at(&init_mm, addr, pte, __pte(0));
++	netback_remove_accelerators(be, dev);
++
++	if (be->netif) {
++		kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++		netif_disconnect(be->netif);
++		be->netif = NULL;
++	}
++	kfree(be);
++	dev->dev.driver_data = NULL;
 +	return 0;
 +}
-+#endif
 +
-+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
-+{
-+	struct gnttab_setup_table setup;
-+	unsigned long *frames;
-+	unsigned int nr_gframes = end_idx + 1;
-+	int rc;
 +
-+	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
-+	if (!frames)
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures and switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
++{
++	const char *message;
++	struct xenbus_transaction xbt;
++	int err;
++	int sg;
++	struct backend_info *be = kzalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
 +		return -ENOMEM;
-+
-+	setup.dom        = DOMID_SELF;
-+	setup.nr_frames  = nr_gframes;
-+	set_xen_guest_handle(setup.frame_list, frames);
-+
-+	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
-+	if (rc == -ENOSYS) {
-+		kfree(frames);
-+		return -ENOSYS;
 +	}
 +
-+	BUG_ON(rc || setup.status);
++	be->dev = dev;
++	dev->dev.driver_data = be;
 +
-+#ifndef __ia64__
-+	if (shared == NULL) {
-+		struct vm_struct *area;
-+		area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
-+		BUG_ON(area == NULL);
-+		shared = area->addr;
-+	}
-+	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-+				 PAGE_SIZE * nr_gframes,
-+				 map_pte_fn, &frames);
-+	BUG_ON(rc);
-+        frames -= nr_gframes; /* adjust after map_pte_fn() */
-+#else
-+	shared = __va(frames[0] << PAGE_SHIFT);
-+#endif
++	sg = 1;
++	if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
++		sg = 0;
 +
-+	kfree(frames);
++	do {
++		err = xenbus_transaction_start(&xbt);
++		if (err) {
++			xenbus_dev_fatal(dev, err, "starting transaction");
++			goto fail;
++		}
 +
-+	return 0;
-+}
++		err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
++		if (err) {
++			message = "writing feature-sg";
++			goto abort_transaction;
++		}
 +
-+int gnttab_resume(void)
-+{
-+	if (max_nr_grant_frames() < nr_grant_frames)
-+		return -ENOSYS;
-+	return gnttab_map(0, nr_grant_frames - 1);
-+}
++		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
++				    "%d", sg);
++		if (err) {
++			message = "writing feature-gso-tcpv4";
++			goto abort_transaction;
++		}
 +
-+int gnttab_suspend(void)
-+{
-+#ifndef __ia64__
-+	apply_to_page_range(&init_mm, (unsigned long)shared,
-+			    PAGE_SIZE * nr_grant_frames,
-+			    unmap_pte_fn, NULL);
-+#endif
-+	return 0;
-+}
++		/* We support rx-copy path. */
++		err = xenbus_printf(xbt, dev->nodename,
++				    "feature-rx-copy", "%d", 1);
++		if (err) {
++			message = "writing feature-rx-copy";
++			goto abort_transaction;
++		}
 +
-+#else /* !CONFIG_XEN */
++		/*
++		 * We don't support rx-flip path (except old guests who don't
++		 * grok this feature flag).
++		 */
++		err = xenbus_printf(xbt, dev->nodename,
++				    "feature-rx-flip", "%d", 0);
++		if (err) {
++			message = "writing feature-rx-flip";
++			goto abort_transaction;
++		}
 +
-+#include <platform-pci.h>
++		err = xenbus_transaction_end(xbt, 0);
++	} while (err == -EAGAIN);
 +
-+static unsigned long resume_frames;
++	if (err) {
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto fail;
++	}
 +
-+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
-+{
-+	struct xen_add_to_physmap xatp;
-+	unsigned int i = end_idx;
++	netback_probe_accelerators(be, dev);
 +
-+	/* Loop backwards, so that the first hypercall has the largest index,
-+	 * ensuring that the table will grow only once.
-+	 */
-+	do {
-+		xatp.domid = DOMID_SELF;
-+		xatp.idx = i;
-+		xatp.space = XENMAPSPACE_grant_table;
-+		xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
-+		if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
-+			BUG();
-+	} while (i-- > start_idx);
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
++	if (err)
++		goto fail;
++
++	/* This kicks hotplug scripts, so do it immediately. */
++	backend_create_netif(be);
 +
 +	return 0;
++
++abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, err, "%s", message);
++fail:
++	DPRINTK("failed");
++	netback_remove(dev);
++	return err;
 +}
 +
-+int gnttab_resume(void)
++
++/**
++ * Handle the creation of the hotplug script environment.  We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_uevent(struct xenbus_device *xdev, char **envp,
++			  int num_envp, char *buffer, int buffer_size)
 +{
-+	unsigned int max_nr_gframes, nr_gframes;
++	struct backend_info *be = xdev->dev.driver_data;
++	netif_t *netif = be->netif;
++	int i = 0, length = 0;
++	char *val;
 +
-+	nr_gframes = nr_grant_frames;
-+	max_nr_gframes = max_nr_grant_frames();
-+	if (max_nr_gframes < nr_gframes)
-+		return -ENOSYS;
++	DPRINTK("netback_uevent");
 +
-+	if (!resume_frames) {
-+		resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
-+		shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
-+		if (shared == NULL) {
-+			printk("error to ioremap gnttab share frames\n");
-+			return -1;
-+		}
++	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
++	if (IS_ERR(val)) {
++		int err = PTR_ERR(val);
++		xenbus_dev_fatal(xdev, err, "reading script");
++		return err;
++	}
++	else {
++		add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
++			       &length, "script=%s", val);
++		kfree(val);
 +	}
 +
-+	gnttab_map(0, nr_gframes - 1);
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "vif=%s", netif->dev->name);
++
++	envp[i] = NULL;
 +
 +	return 0;
 +}
 +
-+#endif /* !CONFIG_XEN */
 +
-+static int gnttab_expand(unsigned int req_entries)
++static void backend_create_netif(struct backend_info *be)
 +{
-+	int rc;
-+	unsigned int cur, extra;
++	int err;
++	long handle;
++	struct xenbus_device *dev = be->dev;
 +
-+	cur = nr_grant_frames;
-+	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
-+		 GREFS_PER_GRANT_FRAME);
-+	if (cur + extra > max_nr_grant_frames())
-+		return -ENOSPC;
++	if (be->netif != NULL)
++		return;
 +
-+	if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
-+		rc = grow_gnttab_list(extra);
++	err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading handle");
++		return;
++	}
 +
-+	return rc;
++	be->netif = netif_alloc(dev->otherend_id, handle);
++	if (IS_ERR(be->netif)) {
++		err = PTR_ERR(be->netif);
++		be->netif = NULL;
++		xenbus_dev_fatal(dev, err, "creating interface");
++		return;
++	}
++
++	kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
 +}
 +
-+int __devinit gnttab_init(void)
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state)
 +{
-+	int i;
-+	unsigned int max_nr_glist_frames;
-+	unsigned int nr_init_grefs;
++	struct backend_info *be = dev->dev.driver_data;
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	DPRINTK("%s", xenbus_strstate(frontend_state));
 +
-+	nr_grant_frames = 1;
-+	boot_max_nr_grant_frames = __max_nr_grant_frames();
++	be->frontend_state = frontend_state;
 +
-+	/* Determine the maximum number of frames required for the
-+	 * grant reference free list on the current hypervisor.
-+	 */
-+	max_nr_glist_frames = (boot_max_nr_grant_frames *
-+			       GREFS_PER_GRANT_FRAME /
-+			       (PAGE_SIZE / sizeof(grant_ref_t)));
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++		if (dev->state == XenbusStateClosed) {
++			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++			       __FUNCTION__, dev->nodename);
++			xenbus_switch_state(dev, XenbusStateInitWait);
++		}
++		break;
 +
-+	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
-+			      GFP_KERNEL);
-+	if (gnttab_list == NULL)
-+		return -ENOMEM;
++	case XenbusStateInitialised:
++		break;
 +
-+	for (i = 0; i < nr_grant_frames; i++) {
-+		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
-+		if (gnttab_list[i] == NULL)
-+			goto ini_nomem;
++	case XenbusStateConnected:
++		if (dev->state == XenbusStateConnected)
++			break;
++		backend_create_netif(be);
++		if (be->netif)
++			connect(be);
++		break;
++
++	case XenbusStateClosing:
++		if (be->netif) {
++			kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++			netif_disconnect(be->netif);
++			be->netif = NULL;
++		}
++		xenbus_switch_state(dev, XenbusStateClosing);
++		break;
++
++	case XenbusStateClosed:
++		xenbus_switch_state(dev, XenbusStateClosed);
++		if (xenbus_dev_is_online(dev))
++			break;
++		/* fall through if not online */
++	case XenbusStateUnknown:
++		device_unregister(&dev->dev);
++		break;
++
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++				 frontend_state);
++		break;
 +	}
++}
++
++
++static void xen_net_read_rate(struct xenbus_device *dev,
++			      unsigned long *bytes, unsigned long *usec)
++{
++	char *s, *e;
++	unsigned long b, u;
++	char *ratestr;
++
++	/* Default to unlimited bandwidth. */
++	*bytes = ~0UL;
++	*usec = 0;
 +
-+	if (gnttab_resume() < 0)
-+		return -ENODEV;
++	ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
++	if (IS_ERR(ratestr))
++		return;
 +
-+	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
++	s = ratestr;
++	b = simple_strtoul(s, &e, 10);
++	if ((s == e) || (*e != ','))
++		goto fail;
 +
-+	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
-+		gnttab_entry(i) = i + 1;
++	s = e + 1;
++	u = simple_strtoul(s, &e, 10);
++	if ((s == e) || (*e != '\0'))
++		goto fail;
 +
-+	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
-+	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
-+	gnttab_free_head  = NR_RESERVED_ENTRIES;
++	*bytes = b;
++	*usec = u;
 +
-+	return 0;
++	kfree(ratestr);
++	return;
 +
-+ ini_nomem:
-+	for (i--; i >= 0; i--)
-+		free_page((unsigned long)gnttab_list[i]);
-+	kfree(gnttab_list);
-+	return -ENOMEM;
++ fail:
++	WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++	kfree(ratestr);
 +}
 +
-+#ifdef CONFIG_XEN
-+core_initcall(gnttab_init);
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/hypervisor_sysfs.c tmp-linux-2.6-xen.patch/drivers/xen/core/hypervisor_sysfs.c
---- pristine-linux-2.6.18.2/drivers/xen/core/hypervisor_sysfs.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/hypervisor_sysfs.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,56 @@
-+/*
-+ *  copyright (c) 2006 IBM Corporation
-+ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
-+ *
-+ *  This program is free software; you can redistribute it and/or modify
-+ *  it under the terms of the GNU General Public License version 2 as
-+ *  published by the Free Software Foundation.
-+ */
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++	char *s, *e, *macstr;
++	int i;
 +
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/kobject.h>
-+#include <xen/hypervisor_sysfs.h>
++	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++	if (IS_ERR(macstr))
++		return PTR_ERR(macstr);
 +
-+static ssize_t hyp_sysfs_show(struct kobject *kobj,
-+			      struct attribute *attr,
-+			      char *buffer)
-+{
-+	struct hyp_sysfs_attr *hyp_attr;
-+	hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
-+	if (hyp_attr->show)
-+		return hyp_attr->show(hyp_attr, buffer);
++	for (i = 0; i < ETH_ALEN; i++) {
++		mac[i] = simple_strtoul(s, &e, 16);
++		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++			kfree(macstr);
++			return -ENOENT;
++		}
++		s = e+1;
++	}
++
++	kfree(macstr);
 +	return 0;
 +}
 +
-+static ssize_t hyp_sysfs_store(struct kobject *kobj,
-+			       struct attribute *attr,
-+			       const char *buffer,
-+			       size_t len)
++static void connect(struct backend_info *be)
 +{
-+	struct hyp_sysfs_attr *hyp_attr;
-+	hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
-+	if (hyp_attr->store)
-+		return hyp_attr->store(hyp_attr, buffer, len);
-+	return 0;
-+}
++	int err;
++	struct xenbus_device *dev = be->dev;
 +
-+static struct sysfs_ops hyp_sysfs_ops = {
-+	.show = hyp_sysfs_show,
-+	.store = hyp_sysfs_store,
-+};
++	err = connect_rings(be);
++	if (err)
++		return;
 +
-+static struct kobj_type hyp_sysfs_kobj_type = {
-+	.sysfs_ops = &hyp_sysfs_ops,
-+};
++	err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++		return;
++	}
 +
-+static int __init hypervisor_subsys_init(void)
-+{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	xen_net_read_rate(dev, &be->netif->credit_bytes,
++			  &be->netif->credit_usec);
++	be->netif->remaining_credit = be->netif->credit_bytes;
 +
-+	hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
-+	return 0;
++	xenbus_switch_state(dev, XenbusStateConnected);
++
++	netif_wake_queue(be->netif->dev);
 +}
 +
-+device_initcall(hypervisor_subsys_init);
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/machine_kexec.c tmp-linux-2.6-xen.patch/drivers/xen/core/machine_kexec.c
---- pristine-linux-2.6.18.2/drivers/xen/core/machine_kexec.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/machine_kexec.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,189 @@
-+/*
-+ * drivers/xen/core/machine_kexec.c 
-+ * handle transition of Linux booting another kernel
-+ */
 +
-+#include <linux/kexec.h>
-+#include <xen/interface/kexec.h>
-+#include <linux/mm.h>
-+#include <linux/bootmem.h>
++static int connect_rings(struct backend_info *be)
++{
++	struct xenbus_device *dev = be->dev;
++	unsigned long tx_ring_ref, rx_ring_ref;
++	unsigned int evtchn, rx_copy;
++	int err;
++	int val;
 +
-+extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, 
-+					 struct kimage *image);
++	DPRINTK("");
 +
-+static int __initdata xen_max_nr_phys_cpus;
-+static struct resource xen_hypervisor_res;
-+static struct resource *xen_phys_cpus;
++	err = xenbus_gather(XBT_NIL, dev->otherend,
++			    "tx-ring-ref", "%lu", &tx_ring_ref,
++			    "rx-ring-ref", "%lu", &rx_ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
 +
-+void __init xen_machine_kexec_setup_resources(void)
-+{
-+	xen_kexec_range_t range;
-+	struct resource *res;
-+	int k = 0;
++	err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
++			   &rx_copy);
++	if (err == -ENOENT) {
++		err = 0;
++		rx_copy = 0;
++	}
++	if (err < 0) {
++		xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
++				 dev->otherend);
++		return err;
++	}
++	be->netif->copying_receiver = !!rx_copy;
 +
-+	if (!is_initial_xendomain())
-+		return;
++	if (be->netif->dev->tx_queue_len != 0) {
++		if (xenbus_scanf(XBT_NIL, dev->otherend,
++				 "feature-rx-notify", "%d", &val) < 0)
++			val = 0;
++		if (val)
++			be->netif->can_queue = 1;
++		else
++			/* Must be non-zero for pfifo_fast to work. */
++			be->netif->dev->tx_queue_len = 1;
++	}
 +
-+	/* determine maximum number of physical cpus */
++	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++		val = 0;
++	if (val) {
++		be->netif->features |= NETIF_F_SG;
++		be->netif->dev->features |= NETIF_F_SG;
++	}
 +
-+	while (1) {
-+		memset(&range, 0, sizeof(range));
-+		range.range = KEXEC_RANGE_MA_CPU;
-+		range.nr = k;
++	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
++			 &val) < 0)
++		val = 0;
++	if (val) {
++		be->netif->features |= NETIF_F_TSO;
++		be->netif->dev->features |= NETIF_F_TSO;
++	}
 +
-+		if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+			break;
++	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
++			 "%d", &val) < 0)
++		val = 0;
++	if (val) {
++		be->netif->features &= ~NETIF_F_IP_CSUM;
++		be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++	}
 +
-+		k++;
++	/* Map the shared frame, irq etc. */
++	err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "mapping shared-frames %lu/%lu port %u",
++				 tx_ring_ref, rx_ring_ref, evtchn);
++		return err;
 +	}
++	return 0;
++}
 +
-+	if (k == 0)
-+		return;
 +
-+	xen_max_nr_phys_cpus = k;
++/* ** Driver Registration ** */
 +
-+	/* allocate xen_phys_cpus */
 +
-+	xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
-+	BUG_ON(xen_phys_cpus == NULL);
++static const struct xenbus_device_id netback_ids[] = {
++	{ "vif" },
++	{ "" }
++};
 +
-+	/* fill in xen_phys_cpus with per-cpu crash note information */
 +
-+	for (k = 0; k < xen_max_nr_phys_cpus; k++) {
-+		memset(&range, 0, sizeof(range));
-+		range.range = KEXEC_RANGE_MA_CPU;
-+		range.nr = k;
++static struct xenbus_driver netback = {
++	.name = "vif",
++	.owner = THIS_MODULE,
++	.ids = netback_ids,
++	.probe = netback_probe,
++	.remove = netback_remove,
++	.uevent = netback_uevent,
++	.otherend_changed = frontend_changed,
++};
 +
-+		if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+			goto err;
 +
-+		res = xen_phys_cpus + k;
++void netif_xenbus_init(void)
++{
++	xenbus_register_backend(&netback);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netfront/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netfront/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,4 @@
 +
-+		memset(res, 0, sizeof(*res));
-+		res->name = "Crash note";
-+		res->start = range.start;
-+		res->end = range.start + range.size - 1;
-+		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
-+	}
++obj-$(CONFIG_XEN_NETDEV_FRONTEND)	:= xennet.o
 +
-+	/* fill in xen_hypervisor_res with hypervisor machine address range */
++xennet-objs := netfront.o accel.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netfront/accel.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netfront/accel.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,824 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	memset(&range, 0, sizeof(range));
-+	range.range = KEXEC_RANGE_MA_XEN;
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
 +
-+	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+		goto err;
++#include "netfront.h"
 +
-+	xen_hypervisor_res.name = "Hypervisor code and data";
-+	xen_hypervisor_res.start = range.start;
-+	xen_hypervisor_res.end = range.start + range.size - 1;
-+	xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++#define DPRINTK(fmt, args...)				\
++	pr_debug("netfront/accel (%s:%d) " fmt,		\
++	       __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...)				\
++	printk(KERN_INFO "netfront/accel: " fmt, ##args)
++#define WPRINTK(fmt, args...)				\
++	printk(KERN_WARNING "netfront/accel: " fmt, ##args)
 +
-+	/* fill in crashk_res if range is reserved by hypervisor */
++static int netfront_remove_accelerator(struct netfront_info *np,
++				       struct xenbus_device *dev);
++static int netfront_load_accelerator(struct netfront_info *np, 
++				     struct xenbus_device *dev, 
++				     const char *frontend);
 +
-+	memset(&range, 0, sizeof(range));
-+	range.range = KEXEC_RANGE_MA_CRASH;
++/*
++ * List of all netfront accelerator plugin modules available.  Each
++ * list entry is of type struct netfront_accelerator.
++ */ 
++static struct list_head accelerators_list;
 +
-+	if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+		return;
++/* Lock to protect access to accelerators_list */
++static spinlock_t accelerators_lock;
 +
-+	if (range.size) {
-+		crashk_res.start = range.start;
-+		crashk_res.end = range.start + range.size - 1;
-+	}
++/* Workqueue to process acceleration configuration changes */
++struct workqueue_struct *accel_watch_workqueue;
 +
-+	return;
++/* Mutex to prevent concurrent loads and suspends, etc. */
++DEFINE_MUTEX(accelerator_mutex);
 +
-+ err:
-+	/*
-+	 * It isn't possible to free xen_phys_cpus this early in the
-+	 * boot. Failure at this stage is unexpected and the amount of
-+	 * memory is small therefore we tolerate the potential leak.
-+         */
-+	xen_max_nr_phys_cpus = 0;
-+	return;
++void netif_init_accel(void)
++{
++	INIT_LIST_HEAD(&accelerators_list);
++	spin_lock_init(&accelerators_lock);
++
++	accel_watch_workqueue = create_workqueue("net_accel");
 +}
 +
-+void __init xen_machine_kexec_register_resources(struct resource *res)
++void netif_exit_accel(void)
 +{
-+	int k;
++	struct netfront_accelerator *accelerator, *tmp;
++	unsigned long flags;
 +
-+	request_resource(res, &xen_hypervisor_res);
++	flush_workqueue(accel_watch_workqueue);
++	destroy_workqueue(accel_watch_workqueue);
 +
-+	for (k = 0; k < xen_max_nr_phys_cpus; k++)
-+		request_resource(&xen_hypervisor_res, xen_phys_cpus + k);
++	spin_lock_irqsave(&accelerators_lock, flags);
 +
-+}
++	list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) {
++		BUG_ON(!list_empty(&accelerator->vif_states));
 +
-+static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
-+{
-+	machine_kexec_setup_load_arg(xki, image);
++		list_del(&accelerator->link);
++		kfree(accelerator->frontend);
++		kfree(accelerator);
++	}
 +
-+	xki->indirection_page = image->head;
-+	xki->start_address = image->start;
++	spin_unlock_irqrestore(&accelerators_lock, flags);
 +}
 +
-+/*
-+ * Load the image into xen so xen can kdump itself
-+ * This might have been done in prepare, but prepare
-+ * is currently called too early. It might make sense
-+ * to move prepare, but for now, just add an extra hook.
-+ */
-+int xen_machine_kexec_load(struct kimage *image)
-+{
-+	xen_kexec_load_t xkl;
-+
-+	memset(&xkl, 0, sizeof(xkl));
-+	xkl.type = image->type;
-+	setup_load_arg(&xkl.image, image);
-+	return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
-+}
 +
-+/*
-+ * Unload the image that was stored by machine_kexec_load()
-+ * This might have been done in machine_kexec_cleanup() but it
-+ * is called too late, and its possible xen could try and kdump
-+ * using resources that have been freed.
++/* 
++ * Watch the configured accelerator and change plugin if it's modified 
 + */
-+void xen_machine_kexec_unload(struct kimage *image)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void accel_watch_work(struct work_struct *context)
++#else
++static void accel_watch_work(void *context)
++#endif
 +{
-+	xen_kexec_load_t xkl;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++	struct netfront_accel_vif_state *vif_state = 
++		container_of(context, struct netfront_accel_vif_state, 
++			     accel_work);
++#else
++        struct netfront_accel_vif_state *vif_state = 
++		(struct netfront_accel_vif_state *)context;
++#endif
++	struct netfront_info *np = vif_state->np;
++	char *accel_frontend;
++	int accel_len, rc = -1;
 +
-+	memset(&xkl, 0, sizeof(xkl));
-+	xkl.type = image->type;
-+	HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl);
-+}
++	mutex_lock(&accelerator_mutex);
 +
-+/*
-+ * Do not allocate memory (or fail in any way) in machine_kexec().
-+ * We are past the point of no return, committed to rebooting now.
-+ *
-+ * This has the hypervisor move to the prefered reboot CPU, 
-+ * stop all CPUs and kexec. That is it combines machine_shutdown()
-+ * and machine_kexec() in Linux kexec terms.
-+ */
-+NORET_TYPE void machine_kexec(struct kimage *image)
-+{
-+	xen_kexec_exec_t xke;
++	accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, 
++				     "accel-frontend", &accel_len);
++	if (IS_ERR(accel_frontend)) {
++		accel_frontend = NULL;
++		netfront_remove_accelerator(np, np->xbdev);
++	} else {
++		/* If this is the first time, request the accelerator,
++		   otherwise only request one if it has changed */
++		if (vif_state->accel_frontend == NULL) {
++			rc = netfront_load_accelerator(np, np->xbdev, 
++						       accel_frontend);
++		} else {
++			if (strncmp(vif_state->accel_frontend, accel_frontend,
++				    accel_len)) {
++				netfront_remove_accelerator(np, np->xbdev);
++				rc = netfront_load_accelerator(np, np->xbdev, 
++							       accel_frontend);
++			}
++		}
++	}
 +
-+	memset(&xke, 0, sizeof(xke));
-+	xke.type = image->type;
-+	HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke);
-+	panic("KEXEC_CMD_kexec hypercall should not return\n");
++	/* Get rid of previous state and replace with the new name */
++	if (vif_state->accel_frontend != NULL)
++		kfree(vif_state->accel_frontend);
++	vif_state->accel_frontend = accel_frontend;
++
++	mutex_unlock(&accelerator_mutex);
++
++	if (rc == 0) {
++		DPRINTK("requesting module %s\n", accel_frontend);
++		request_module("%s", accel_frontend);
++		/*
++		 * Module should now call netfront_accelerator_loaded() once
++		 * it's up and running, and we can continue from there 
++		 */
++	}
 +}
 +
-+void machine_shutdown(void)
++
++static void accel_watch_changed(struct xenbus_watch *watch,
++				const char **vec, unsigned int len)
 +{
-+	/* do nothing */
++	struct netfront_accel_vif_state *vif_state = 
++		container_of(watch, struct netfront_accel_vif_state,
++			     accel_watch);
++	queue_work(accel_watch_workqueue, &vif_state->accel_work);
 +}
 +
 +
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/machine_reboot.c tmp-linux-2.6-xen.patch/drivers/xen/core/machine_reboot.c
---- pristine-linux-2.6.18.2/drivers/xen/core/machine_reboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/machine_reboot.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,241 @@
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <linux/stringify.h>
-+#include <linux/stop_machine.h>
-+#include <asm/irq.h>
-+#include <asm/mmu_context.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <linux/cpu.h>
-+#include <linux/kthread.h>
-+#include <xen/gnttab.h>
-+#include <xen/xencons.h>
-+#include <xen/cpu_hotplug.h>
-+#include <xen/interface/vcpu.h>
++void netfront_accelerator_add_watch(struct netfront_info *np)
++{
++	int err;
++	
++	/* Check we're not trying to overwrite an existing watch */
++	BUG_ON(np->accel_vif_state.accel_watch.node != NULL);
 +
-+#if defined(__i386__) || defined(__x86_64__)
++	/* Get a watch on the accelerator plugin */
++	err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, 
++				 "accel-frontend", 
++				 &np->accel_vif_state.accel_watch,
++				 accel_watch_changed);
++	if (err) {
++		DPRINTK("%s: Failed to register accel watch: %d\n",
++                        __FUNCTION__, err);
++		np->accel_vif_state.accel_watch.node = NULL;
++        }
++}
 +
-+/*
-+ * Power off function, if any
-+ */
-+void (*pm_power_off)(void);
-+EXPORT_SYMBOL(pm_power_off);
 +
-+void machine_emergency_restart(void)
++static
++void netfront_accelerator_remove_watch(struct netfront_info *np)
 +{
-+	/* We really want to get pending console data out before we die. */
-+	xencons_force_flush();
-+	HYPERVISOR_shutdown(SHUTDOWN_reboot);
-+}
++	struct netfront_accel_vif_state *vif_state = &np->accel_vif_state;
 +
-+void machine_restart(char * __unused)
-+{
-+	machine_emergency_restart();
++	/* Get rid of watch on accelerator plugin */
++	if (vif_state->accel_watch.node != NULL) {
++		unregister_xenbus_watch(&vif_state->accel_watch);
++		kfree(vif_state->accel_watch.node);
++		vif_state->accel_watch.node = NULL;
++
++		flush_workqueue(accel_watch_workqueue);
++
++		/* Clean up any state left from watch */
++		if (vif_state->accel_frontend != NULL) {
++			kfree(vif_state->accel_frontend);
++			vif_state->accel_frontend = NULL;
++		}
++	}	
 +}
 +
-+void machine_halt(void)
-+{
-+	machine_power_off();
-+}
 +
-+void machine_power_off(void)
++/* 
++ * Initialise the accel_vif_state field in the netfront state
++ */ 
++void init_accelerator_vif(struct netfront_info *np,
++			  struct xenbus_device *dev)
 +{
-+	/* We really want to get pending console data out before we die. */
-+	xencons_force_flush();
-+	if (pm_power_off)
-+		pm_power_off();
-+	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++	np->accelerator = NULL;
++
++	/* It's assumed that these things don't change */
++	np->accel_vif_state.np = np;
++	np->accel_vif_state.dev = dev;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++	INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work);
++#else
++	INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, 
++		  &np->accel_vif_state);
++#endif
 +}
 +
-+int reboot_thru_bios = 0;	/* for dmi_scan.c */
-+EXPORT_SYMBOL(machine_restart);
-+EXPORT_SYMBOL(machine_halt);
-+EXPORT_SYMBOL(machine_power_off);
 +
-+static void pre_suspend(void)
++/*
++ * Compare a frontend description string against an accelerator to see
++ * if they match.  Would ultimately be nice to replace the string with
++ * a unique numeric identifier for each accelerator.
++ */
++static int match_accelerator(const char *frontend, 
++			     struct netfront_accelerator *accelerator)
 +{
-+	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+	HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
-+				     __pte_ma(0), 0);
-+
-+	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
-+	xen_start_info->console.domU.mfn =
-+		mfn_to_pfn(xen_start_info->console.domU.mfn);
++	return strcmp(frontend, accelerator->frontend) == 0;
 +}
 +
-+static void post_suspend(int suspend_cancelled)
++
++/* 
++ * Add a frontend vif to the list of vifs that is using a netfront
++ * accelerator plugin module.
++ */
++static void add_accelerator_vif(struct netfront_accelerator *accelerator,
++				struct netfront_info *np)
 +{
-+	int i, j, k, fpp;
-+	unsigned long shinfo_mfn;
-+	extern unsigned long max_pfn;
-+	extern unsigned long *pfn_to_mfn_frame_list_list;
-+	extern unsigned long *pfn_to_mfn_frame_list[];
++	unsigned long flags;
 +
-+	if (suspend_cancelled) {
-+		xen_start_info->store_mfn =
-+			pfn_to_mfn(xen_start_info->store_mfn);
-+		xen_start_info->console.domU.mfn =
-+			pfn_to_mfn(xen_start_info->console.domU.mfn);
++	/* Need lock to write list */
++	spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++	if (np->accelerator == NULL) {
++		np->accelerator = accelerator;
++		
++		list_add(&np->accel_vif_state.link, &accelerator->vif_states);
 +	} else {
-+#ifdef CONFIG_SMP
-+		cpu_initialized_map = cpu_online_map;
-+#endif
++		/* 
++		 * May get here legitimately if suspend_cancel is
++		 * called, but in that case configuration should not
++		 * have changed
++		 */
++		BUG_ON(np->accelerator != accelerator);
 +	}
 +
-+	shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
-+	HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
-+				     pfn_pte_ma(shinfo_mfn, PAGE_KERNEL), 0);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++	spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++}
 +
-+	memset(empty_zero_page, 0, PAGE_SIZE);
 +
-+	fpp = PAGE_SIZE/sizeof(unsigned long);
-+	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
-+		if ((j % fpp) == 0) {
-+			k++;
-+			pfn_to_mfn_frame_list_list[k] =
-+				virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+			j = 0;
-+		}
-+		pfn_to_mfn_frame_list[k][j] =
-+			virt_to_mfn(&phys_to_machine_mapping[i]);
++/*
++ * Initialise the state to track an accelerator plugin module.
++ */ 
++static int init_accelerator(const char *frontend, 
++			    struct netfront_accelerator **result,
++			    struct netfront_accel_hooks *hooks)
++{
++	struct netfront_accelerator *accelerator = 
++		kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL);
++	unsigned long flags;
++	int frontend_len;
++
++	if (!accelerator) {
++		DPRINTK("no memory for accelerator\n");
++		return -ENOMEM;
 +	}
-+	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+		virt_to_mfn(pfn_to_mfn_frame_list_list);
-+}
 +
-+#else /* !(defined(__i386__) || defined(__x86_64__)) */
++	frontend_len = strlen(frontend) + 1;
++	accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL);
++	if (!accelerator->frontend) {
++		DPRINTK("no memory for accelerator\n");
++		kfree(accelerator);
++		return -ENOMEM;
++	}
++	strlcpy(accelerator->frontend, frontend, frontend_len);
++	
++	INIT_LIST_HEAD(&accelerator->vif_states);
++	spin_lock_init(&accelerator->vif_states_lock);
++
++	accelerator->hooks = hooks;
 +
-+#ifndef HAVE_XEN_PRE_SUSPEND
-+#define xen_pre_suspend()	((void)0)
-+#endif
++	spin_lock_irqsave(&accelerators_lock, flags);
++	list_add(&accelerator->link, &accelerators_list);
++	spin_unlock_irqrestore(&accelerators_lock, flags);
 +
-+#ifndef HAVE_XEN_POST_SUSPEND
-+#define xen_post_suspend(x)	((void)0)
-+#endif
++	*result = accelerator;
 +
-+#define switch_idle_mm()	((void)0)
-+#define mm_pin_all()		((void)0)
-+#define pre_suspend()		xen_pre_suspend()
-+#define post_suspend(x)		xen_post_suspend(x)
++	return 0;
++}					
 +
-+#endif
 +
-+static int take_machine_down(void *p_fast_suspend)
++/* 
++ * Modify the hooks stored in the per-vif state to match that in the
++ * netfront accelerator's state.
++ */
++static void 
++accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state)
 +{
-+	int fast_suspend = *(int *)p_fast_suspend;
-+	int suspend_cancelled, err;
-+	extern void time_resume(void);
++	/* This function must be called with the vif_states_lock held */
 +
-+	if (fast_suspend) {
-+		BUG_ON(!irqs_disabled());
-+	} else {
-+		BUG_ON(irqs_disabled());
++	DPRINTK("%p\n",vif_state);
 +
-+		for (;;) {
-+			err = smp_suspend();
-+			if (err)
-+				return err;
++	/* Make sure there are no data path operations going on */
++	netif_poll_disable(vif_state->np->netdev);
++	netif_tx_lock_bh(vif_state->np->netdev);
 +
-+			xenbus_suspend();
-+			preempt_disable();
++	vif_state->hooks = vif_state->np->accelerator->hooks;
 +
-+			if (num_online_cpus() == 1)
-+				break;
++	netif_tx_unlock_bh(vif_state->np->netdev);
++	netif_poll_enable(vif_state->np->netdev);
++}
 +
-+			preempt_enable();
-+			xenbus_suspend_cancel();
-+		}
 +
-+		local_irq_disable();
++static void accelerator_probe_new_vif(struct netfront_info *np,
++				      struct xenbus_device *dev, 
++				      struct netfront_accelerator *accelerator)
++{
++	struct netfront_accel_hooks *hooks;
++	unsigned long flags;
++
++	DPRINTK("\n");
++
++	/* Include this frontend device on the accelerator's list */
++	add_accelerator_vif(accelerator, np);
++	
++	hooks = accelerator->hooks;
++	
++	if (hooks) {
++		if (hooks->new_device(np->netdev, dev) == 0) {
++			spin_lock_irqsave
++				(&accelerator->vif_states_lock, flags);
++
++			accelerator_set_vif_state_hooks(&np->accel_vif_state);
++
++			spin_unlock_irqrestore
++				(&accelerator->vif_states_lock, flags);
++		}
 +	}
 +
-+	mm_pin_all();
-+	gnttab_suspend();
-+	pre_suspend();
++	return;
++}
 +
-+	/*
-+	 * This hypercall returns 1 if suspend was cancelled or the domain was
-+	 * merely checkpointed, and 0 if it is resuming in a new domain.
-+	 */
-+	suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
 +
-+	post_suspend(suspend_cancelled);
-+	gnttab_resume();
-+	if (!suspend_cancelled) {
-+		irq_resume();
-+#ifdef __x86_64__
-+		/*
-+		 * Older versions of Xen do not save/restore the user %cr3.
-+		 * We do it here just in case, but there's no need if we are
-+		 * in fast-suspend mode as that implies a new enough Xen.
-+		 */
-+		if (!fast_suspend) {
-+			struct mmuext_op op;
-+			op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+			op.arg1.mfn = pfn_to_mfn(__pa(__user_pgd(
-+				current->active_mm->pgd)) >> PAGE_SHIFT);
-+			if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-+				BUG();
++/*  
++ * Request that a particular netfront accelerator plugin is loaded.
++ * Usually called as a result of the vif configuration specifying
++ * which one to use. Must be called with accelerator_mutex held 
++ */
++static int netfront_load_accelerator(struct netfront_info *np, 
++				     struct xenbus_device *dev, 
++				     const char *frontend)
++{
++	struct netfront_accelerator *accelerator;
++	int rc = 0;
++
++	DPRINTK(" %s\n", frontend);
++
++	/* 
++	 * Look at list of loaded accelerators to see if the requested
++	 * one is already there 
++	 */
++	list_for_each_entry(accelerator, &accelerators_list, link) {
++		if (match_accelerator(frontend, accelerator)) {
++			accelerator_probe_new_vif(np, dev, accelerator);
++			return 0;
 +		}
-+#endif
 +	}
-+	time_resume();
 +
-+	if (!fast_suspend)
-+		local_irq_enable();
++	/* Couldn't find it, so create a new one and load the module */
++	if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) {
++		return rc;
++	}
 +
-+	return suspend_cancelled;
++	/* Include this frontend device on the accelerator's list */
++	add_accelerator_vif(accelerator, np);
++
++	return rc;
 +}
 +
-+int __xen_suspend(int fast_suspend)
++
++/*
++ * Go through all the netfront vifs and see if they have requested
++ * this accelerator.  Notify the accelerator plugin of the relevant
++ * device if so.  Called when an accelerator plugin module is first
++ * loaded and connects to netfront.
++ */
++static void 
++accelerator_probe_vifs(struct netfront_accelerator *accelerator,
++		       struct netfront_accel_hooks *hooks)
 +{
-+	int err, suspend_cancelled;
++	struct netfront_accel_vif_state *vif_state, *tmp;
++	unsigned long flags;
 +
-+	BUG_ON(smp_processor_id() != 0);
-+	BUG_ON(in_interrupt());
++	DPRINTK("%p\n", accelerator);
 +
-+#if defined(__i386__) || defined(__x86_64__)
-+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+		printk(KERN_WARNING "Cannot suspend in "
-+		       "auto_translated_physmap mode.\n");
-+		return -EOPNOTSUPP;
++	/* 
++	 * Store the hooks for future calls to probe a new device, and
++	 * to wire into the vif_state once the accelerator plugin is
++	 * ready to accelerate each vif
++	 */
++	BUG_ON(hooks == NULL);
++	accelerator->hooks = hooks;
++
++	/* 
++	 *  currently hold accelerator_mutex, so don't need
++	 *  vif_states_lock to read the list
++	 */
++	list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states,
++				 link) {
++		struct netfront_info *np = vif_state->np;
++		
++		if (hooks->new_device(np->netdev, vif_state->dev) == 0) {
++			spin_lock_irqsave
++				(&accelerator->vif_states_lock, flags);
++
++			accelerator_set_vif_state_hooks(vif_state);
++
++			spin_unlock_irqrestore
++				(&accelerator->vif_states_lock, flags);
++		}
 +	}
-+#endif
++}
 +
-+	/* If we are definitely UP then 'slow mode' is actually faster. */
-+	if (num_possible_cpus() == 1)
-+		fast_suspend = 0;
 +
-+	if (fast_suspend) {
-+		xenbus_suspend();
-+		err = stop_machine_run(take_machine_down, &fast_suspend, 0);
-+		if (err < 0)
-+			xenbus_suspend_cancel();
-+	} else {
-+		err = take_machine_down(&fast_suspend);
++/* 
++ * Called by the netfront accelerator plugin module when it has loaded 
++ */
++int netfront_accelerator_loaded(int version, const char *frontend, 
++				struct netfront_accel_hooks *hooks)
++{
++	struct netfront_accelerator *accelerator;
++
++	if (is_initial_xendomain())
++		return -EINVAL;
++
++	if (version != NETFRONT_ACCEL_VERSION) {
++		if (version > NETFRONT_ACCEL_VERSION) {
++			/* Caller has higher version number, leave it
++			   up to them to decide whether to continue.
++			   They can re-call with a lower number if
++			   they're happy to be compatible with us */
++			return NETFRONT_ACCEL_VERSION;
++		} else {
++			/* We have a more recent version than caller.
++			   Currently reject, but may in future be able
++			   to be backwardly compatible */
++			return -EPROTO;
++		}
 +	}
 +
-+	if (err < 0)
-+		return err;
++	mutex_lock(&accelerator_mutex);
 +
-+	suspend_cancelled = err;
-+	if (!suspend_cancelled) {
-+		xencons_resume();
-+		xenbus_resume();
-+	} else {
-+		xenbus_suspend_cancel();
++	/* 
++	 * Look through list of accelerators to see if it has already
++	 * been requested
++	 */
++	list_for_each_entry(accelerator, &accelerators_list, link) {
++		if (match_accelerator(frontend, accelerator)) {
++			accelerator_probe_vifs(accelerator, hooks);
++			goto out;
++		}
 +	}
 +
-+	if (!fast_suspend)
-+		smp_resume();
++	/*
++	 * If it wasn't in the list, add it now so that when it is
++	 * requested the caller will find it
++	 */
++	DPRINTK("Couldn't find matching accelerator (%s)\n",
++		frontend);
 +
++	init_accelerator(frontend, &accelerator, hooks);
++
++ out:
++	mutex_unlock(&accelerator_mutex);
 +	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/Makefile tmp-linux-2.6-xen.patch/drivers/xen/core/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/core/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/Makefile	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,12 @@
-+#
-+# Makefile for the linux kernel.
-+#
++EXPORT_SYMBOL_GPL(netfront_accelerator_loaded);
 +
-+obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o
 +
-+obj-$(CONFIG_PROC_FS)		+= xen_proc.o
-+obj-$(CONFIG_SYS_HYPERVISOR)	+= hypervisor_sysfs.o
-+obj-$(CONFIG_HOTPLUG_CPU)	+= cpu_hotplug.o
-+obj-$(CONFIG_XEN_SYSFS)		+= xen_sysfs.o
-+obj-$(CONFIG_XEN_SMPBOOT)	+= smpboot.o
-+obj-$(CONFIG_KEXEC)		+= machine_kexec.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/reboot.c tmp-linux-2.6-xen.patch/drivers/xen/core/reboot.c
---- pristine-linux-2.6.18.2/drivers/xen/core/reboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/reboot.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,243 @@
-+#define __KERNEL_SYSCALLS__
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
++/* 
++ * Remove the hooks from a single vif state.
++ */
++static void 
++accelerator_remove_single_hook(struct netfront_accelerator *accelerator,
++			       struct netfront_accel_vif_state *vif_state)
++{
++	/* Make sure there are no data path operations going on */
++	netif_poll_disable(vif_state->np->netdev);
++	netif_tx_lock_bh(vif_state->np->netdev);
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++	/* 
++	 * Remove the hooks, but leave the vif_state on the
++	 * accelerator's list as that signifies this vif is
++	 * interested in using that accelerator if it becomes
++	 * available again
++	 */
++	vif_state->hooks = NULL;
++	
++	netif_tx_unlock_bh(vif_state->np->netdev);
++	netif_poll_enable(vif_state->np->netdev);		       
++}
 +
-+MODULE_LICENSE("Dual BSD/GPL");
 +
-+#define SHUTDOWN_INVALID  -1
-+#define SHUTDOWN_POWEROFF  0
-+#define SHUTDOWN_SUSPEND   2
-+/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
-+ * report a crash, not be instructed to crash!
-+ * HALT is the same as POWEROFF, as far as we're concerned.  The tools use
-+ * the distinction when we return the reason code to them.
++/* 
++ * Safely remove the accelerator function hooks from a netfront state.
 + */
-+#define SHUTDOWN_HALT      4
-+
-+/* Ignore multiple shutdown requests. */
-+static int shutting_down = SHUTDOWN_INVALID;
-+
-+/* Can we leave APs online when we suspend? */
-+static int fast_suspend;
++static void accelerator_remove_hooks(struct netfront_accelerator *accelerator)
++{
++	struct netfront_accel_hooks *hooks;
++	struct netfront_accel_vif_state *vif_state, *tmp;
++	unsigned long flags;
 +
-+static void __shutdown_handler(void *unused);
-+static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++	/* Mutex is held so don't need vif_states_lock to iterate list */
++	list_for_each_entry_safe(vif_state, tmp,
++				 &accelerator->vif_states,
++				 link) {
++		spin_lock_irqsave(&accelerator->vif_states_lock, flags);
 +
-+int __xen_suspend(int fast_suspend);
++		if(vif_state->hooks) {
++			hooks = vif_state->hooks;
++			
++			/* Last chance to get statistics from the accelerator */
++			hooks->get_stats(vif_state->np->netdev,
++					 &vif_state->np->stats);
 +
-+static int shutdown_process(void *__unused)
-+{
-+	static char *envp[] = { "HOME=/", "TERM=linux",
-+				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-+	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
++			spin_unlock_irqrestore(&accelerator->vif_states_lock,
++					       flags);
 +
-+	extern asmlinkage long sys_reboot(int magic1, int magic2,
-+					  unsigned int cmd, void *arg);
++			accelerator_remove_single_hook(accelerator, vif_state);
 +
-+	if ((shutting_down == SHUTDOWN_POWEROFF) ||
-+	    (shutting_down == SHUTDOWN_HALT)) {
-+		if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
-+					envp, 0) < 0) {
-+#ifdef CONFIG_XEN
-+			sys_reboot(LINUX_REBOOT_MAGIC1,
-+				   LINUX_REBOOT_MAGIC2,
-+				   LINUX_REBOOT_CMD_POWER_OFF,
-+				   NULL);
-+#endif /* CONFIG_XEN */
++			accelerator->hooks->remove(vif_state->dev);
++		} else {
++			spin_unlock_irqrestore(&accelerator->vif_states_lock,
++					       flags);
 +		}
 +	}
-+
-+	shutting_down = SHUTDOWN_INVALID; /* could try again */
-+
-+	return 0;
++	
++	accelerator->hooks = NULL;
 +}
 +
-+static int xen_suspend(void *__unused)
++
++/* 
++ * Called by a netfront accelerator when it is unloaded.  This safely
++ * removes the hooks into the plugin and blocks until all devices have
++ * finished using it, so on return it is safe to unload.
++ */
++void netfront_accelerator_stop(const char *frontend)
 +{
-+	int err;
++	struct netfront_accelerator *accelerator;
++	unsigned long flags;
 +
-+	daemonize("suspend");
-+	err = set_cpus_allowed(current, cpumask_of_cpu(0));
-+	if (err) {
-+		printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
-+		goto out;
-+	}
++	mutex_lock(&accelerator_mutex);
++	spin_lock_irqsave(&accelerators_lock, flags);
 +
-+	err = __xen_suspend(fast_suspend);
-+	if (err)
-+		printk(KERN_ERR "Xen suspend failed (%d)\n", err);
++	list_for_each_entry(accelerator, &accelerators_list, link) {
++		if (match_accelerator(frontend, accelerator)) {
++			spin_unlock_irqrestore(&accelerators_lock, flags);
++
++			accelerator_remove_hooks(accelerator);
 +
++			goto out;
++		}
++	}
++	spin_unlock_irqrestore(&accelerators_lock, flags);
 + out:
-+	shutting_down = SHUTDOWN_INVALID;
-+	return 0;
++	mutex_unlock(&accelerator_mutex);
 +}
++EXPORT_SYMBOL_GPL(netfront_accelerator_stop);
 +
-+static void __shutdown_handler(void *unused)
++
++/* Helper for call_remove and do_suspend */
++static int do_remove(struct netfront_info *np, struct xenbus_device *dev,
++		     unsigned long *lock_flags)
 +{
-+	int err;
++	struct netfront_accelerator *accelerator = np->accelerator;
++ 	struct netfront_accel_hooks *hooks;
++ 	int rc = 0;
++ 
++	if (np->accel_vif_state.hooks) {
++		hooks = np->accel_vif_state.hooks;
 +
-+	err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ?
-+			    xen_suspend : shutdown_process,
-+			    NULL, CLONE_FS | CLONE_FILES);
++		/* Last chance to get statistics from the accelerator */
++		hooks->get_stats(np->netdev, &np->stats);
 +
-+	if (err < 0) {
-+		printk(KERN_WARNING "Error creating shutdown process (%d): "
-+		       "retrying...\n", -err);
-+		schedule_delayed_work(&shutdown_work, HZ/2);
++		spin_unlock_irqrestore(&accelerator->vif_states_lock, 
++				       *lock_flags);
++
++ 		/* 
++ 		 * Try and do the opposite of accelerator_probe_new_vif
++ 		 * to ensure there's no state pointing back at the 
++ 		 * netdev 
++ 		 */
++		accelerator_remove_single_hook(accelerator, 
++ 					       &np->accel_vif_state);
++
++		rc = accelerator->hooks->remove(dev);
++
++		spin_lock_irqsave(&accelerator->vif_states_lock, *lock_flags);
 +	}
++ 
++ 	return rc;
 +}
 +
-+static void shutdown_handler(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
++
++static int netfront_remove_accelerator(struct netfront_info *np,
++				       struct xenbus_device *dev)
 +{
-+	extern void ctrl_alt_del(void);
-+	char *str;
-+	struct xenbus_transaction xbt;
-+	int err;
++	struct netfront_accelerator *accelerator;
++ 	struct netfront_accel_vif_state *tmp_vif_state;
++  	unsigned long flags;
++	int rc = 0; 
 +
-+	if (shutting_down != SHUTDOWN_INVALID)
-+		return;
++ 	/* Check that we've got a device that was accelerated */
++ 	if (np->accelerator == NULL)
++		return rc;
 +
-+ again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err)
-+		return;
++	accelerator = np->accelerator;
 +
-+	str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
-+	/* Ignore read errors and empty reads. */
-+	if (XENBUS_IS_ERR_READ(str)) {
-+		xenbus_transaction_end(xbt, 1);
-+		return;
++	spin_lock_irqsave(&accelerator->vif_states_lock, flags); 
++
++	list_for_each_entry(tmp_vif_state, &accelerator->vif_states,
++			    link) {
++		if (tmp_vif_state == &np->accel_vif_state) {
++			list_del(&np->accel_vif_state.link);
++			break;
++		}
 +	}
 +
-+	xenbus_write(xbt, "control", "shutdown", "");
++	rc = do_remove(np, dev, &flags);
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN) {
-+		kfree(str);
-+		goto again;
-+	}
++	np->accelerator = NULL;
 +
-+	if (strcmp(str, "poweroff") == 0)
-+		shutting_down = SHUTDOWN_POWEROFF;
-+	else if (strcmp(str, "reboot") == 0)
-+		ctrl_alt_del();
-+	else if (strcmp(str, "suspend") == 0)
-+		shutting_down = SHUTDOWN_SUSPEND;
-+	else if (strcmp(str, "halt") == 0)
-+		shutting_down = SHUTDOWN_HALT;
-+	else {
-+		printk("Ignoring shutdown request: %s\n", str);
-+		shutting_down = SHUTDOWN_INVALID;
-+	}
++	spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); 
 +
-+	if (shutting_down != SHUTDOWN_INVALID)
-+		schedule_work(&shutdown_work);
++	return rc;
++}
 +
-+	kfree(str);
++
++int netfront_accelerator_call_remove(struct netfront_info *np,
++				     struct xenbus_device *dev)
++{
++	int rc;
++	netfront_accelerator_remove_watch(np);
++	mutex_lock(&accelerator_mutex);
++	rc = netfront_remove_accelerator(np, dev);
++	mutex_unlock(&accelerator_mutex);
++	return rc;
 +}
 +
-+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
-+			  unsigned int len)
++  
++int netfront_accelerator_suspend(struct netfront_info *np,
++ 				 struct xenbus_device *dev)
 +{
-+	char sysrq_key = '\0';
-+	struct xenbus_transaction xbt;
-+	int err;
++	unsigned long flags;
++	int rc = 0;
++	
++	netfront_accelerator_remove_watch(np);
 +
-+ again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err)
-+		return;
-+	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
-+		printk(KERN_ERR "Unable to read sysrq code in "
-+		       "control/sysrq\n");
-+		xenbus_transaction_end(xbt, 1);
-+		return;
-+	}
++	mutex_lock(&accelerator_mutex);
 +
-+	if (sysrq_key != '\0')
-+		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++ 	/* Check that we've got a device that was accelerated */
++ 	if (np->accelerator == NULL)
++		goto out;
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
++	/* 
++	 * Call the remove accelerator hook, but leave the vif_state
++	 * on the accelerator's list in case there is a suspend_cancel.
++	 */
++	spin_lock_irqsave(&np->accelerator->vif_states_lock, flags); 
++	
++	rc = do_remove(np, dev, &flags);
 +
-+#ifdef CONFIG_MAGIC_SYSRQ
-+	if (sysrq_key != '\0')
-+		handle_sysrq(sysrq_key, NULL, NULL);
-+#endif
++	spin_unlock_irqrestore(&np->accelerator->vif_states_lock, flags); 
++ out:
++	mutex_unlock(&accelerator_mutex);
++	return rc;
++}
++  
++  
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++ 					struct xenbus_device *dev)
++{
++	/* 
++	 * Setting the watch will cause it to fire and probe the
++	 * accelerator, so no need to call accelerator_probe_new_vif()
++	 * directly here
++	 */
++	if (dev->state == XenbusStateConnected)
++		netfront_accelerator_add_watch(np);
++	return 0;
 +}
++ 
++ 
++void netfront_accelerator_resume(struct netfront_info *np,
++ 				 struct xenbus_device *dev)
++{
++ 	struct netfront_accel_vif_state *accel_vif_state = NULL;
++ 	spinlock_t *vif_states_lock;
++ 	unsigned long flags;
++ 
++ 	mutex_lock(&accelerator_mutex);
 +
-+static struct xenbus_watch shutdown_watch = {
-+	.node = "control/shutdown",
-+	.callback = shutdown_handler
-+};
++	/* Check that we've got a device that was accelerated */
++ 	if(np->accelerator == NULL)
++		goto out;
 +
-+static struct xenbus_watch sysrq_watch = {
-+	.node = "control/sysrq",
-+	.callback = sysrq_handler
-+};
++ 	/* Find the vif_state from the accelerator's list */
++ 	list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, 
++ 			    link) {
++ 		if (accel_vif_state->dev == dev) {
++ 			BUG_ON(accel_vif_state != &np->accel_vif_state);
++ 
++ 			vif_states_lock = &np->accelerator->vif_states_lock;
++			spin_lock_irqsave(vif_states_lock, flags); 
++ 
++ 			/* 
++ 			 * Remove it from the accelerator's list so
++ 			 * state is consistent for probing new vifs
++ 			 * when they get connected
++ 			 */
++ 			list_del(&accel_vif_state->link);
++ 			np->accelerator = NULL;
++ 
++ 			spin_unlock_irqrestore(vif_states_lock, flags); 
++ 			
++			break;
++ 		}
++ 	}
 +
-+static int setup_shutdown_watcher(void)
-+{
-+	int err;
++ out:
++	mutex_unlock(&accelerator_mutex);
++	return;
++}
 +
-+	xenbus_scanf(XBT_NIL, "control",
-+		     "platform-feature-multiprocessor-suspend",
-+		     "%d", &fast_suspend);
 +
-+	err = register_xenbus_watch(&shutdown_watch);
-+	if (err) {
-+		printk(KERN_ERR "Failed to set shutdown watcher\n");
-+		return err;
-+	}
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++					   struct netfront_info *np)
++{
++	struct netfront_accelerator *accelerator;
++	struct netfront_accel_hooks *hooks;
++	int rc = 1;
++	unsigned long flags;
 +
-+	err = register_xenbus_watch(&sysrq_watch);
-+	if (err) {
-+		printk(KERN_ERR "Failed to set sysrq watcher\n");
-+		return err;
++	accelerator = np->accelerator;
++
++	/* Call the check_ready accelerator hook. */ 
++	if (np->accel_vif_state.hooks && accelerator) {
++		spin_lock_irqsave(&accelerator->vif_states_lock, flags); 
++		hooks = np->accel_vif_state.hooks;
++		if (hooks && np->accelerator == accelerator)
++			rc = np->accel_vif_state.hooks->check_ready(dev);
++		spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
 +	}
 +
-+	return 0;
++	return rc;
 +}
 +
-+#ifdef CONFIG_XEN
 +
-+static int shutdown_event(struct notifier_block *notifier,
-+			  unsigned long event,
-+			  void *data)
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++					     struct net_device *dev)
 +{
-+	setup_shutdown_watcher();
-+	return NOTIFY_DONE;
-+}
++	struct netfront_accelerator *accelerator;
++	struct netfront_accel_hooks *hooks;
++	unsigned long flags;
 +
-+static int __init setup_shutdown_event(void)
-+{
-+	static struct notifier_block xenstore_notifier = {
-+		.notifier_call = shutdown_event
-+	};
-+	register_xenstore_notifier(&xenstore_notifier);
++	accelerator = np->accelerator;
 +
-+	return 0;
++	/* Call the stop_napi_interrupts accelerator hook. */
++	if (np->accel_vif_state.hooks && accelerator != NULL) {
++		spin_lock_irqsave(&accelerator->vif_states_lock, flags); 
++		hooks = np->accel_vif_state.hooks;
++		if (hooks && np->accelerator == accelerator)
++ 			np->accel_vif_state.hooks->stop_napi_irq(dev);
++		spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++	}
 +}
 +
-+subsys_initcall(setup_shutdown_event);
-+
-+#else /* !defined(CONFIG_XEN) */
 +
-+int xen_reboot_init(void)
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++					struct net_device *dev)
 +{
-+	return setup_shutdown_watcher();
++	struct netfront_accelerator *accelerator;
++	struct netfront_accel_hooks *hooks;
++	unsigned long flags;
++	int rc = 0;
++
++	accelerator = np->accelerator;
++
++	/* Call the get_stats accelerator hook. */
++	if (np->accel_vif_state.hooks && accelerator != NULL) {
++		spin_lock_irqsave(&accelerator->vif_states_lock, flags); 
++		hooks = np->accel_vif_state.hooks;
++		if (hooks && np->accelerator == accelerator)
++ 			rc = np->accel_vif_state.hooks->get_stats(dev,
++								  &np->stats);
++		spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++	}
++	return rc;
 +}
 +
-+#endif /* !defined(CONFIG_XEN) */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/smpboot.c tmp-linux-2.6-xen.patch/drivers/xen/core/smpboot.c
---- pristine-linux-2.6.18.2/drivers/xen/core/smpboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/smpboot.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,452 @@
-+/*
-+ *	Xen SMP booting functions
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netfront/netfront.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netfront/netfront.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2240 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
 + *
-+ *	See arch/i386/kernel/smpboot.c for copyright and credits for derived
-+ *	portions of this file.
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
 +#include <linux/module.h>
-+#include <linux/init.h>
++#include <linux/version.h>
 +#include <linux/kernel.h>
-+#include <linux/mm.h>
 +#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/smp_lock.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/pgalloc.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/bitops.h>
++#include <linux/ethtool.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++#include <linux/io.h>
++#include <linux/moduleparam.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <asm/uaccess.h>
 +#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
-+#include <xen/cpu_hotplug.h>
 +#include <xen/xenbus.h>
++#include <xen/interface/io/netif.h>
++#include <xen/interface/memory.h>
++#include <xen/balloon.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/uaccess.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
 +
-+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++struct netfront_cb {
++	struct page *page;
++	unsigned offset;
++};
 +
-+extern int local_setup_timer(unsigned int cpu);
-+extern void local_teardown_timer(unsigned int cpu);
++#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
 +
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void system_call(void);
-+extern void smp_trap_init(trap_info_t *);
++#include "netfront.h"
 +
-+/* Number of siblings per CPU package */
-+int smp_num_siblings = 1;
-+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-+EXPORT_SYMBOL(phys_proc_id);
-+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-+EXPORT_SYMBOL(cpu_core_id);
++/*
++ * Mutually-exclusive module options to select receive data path:
++ *  rx_copy : Packets are copied by network backend into local memory
++ *  rx_flip : Page containing packet data is transferred to our ownership
++ * For fully-virtualised guests there is no option - copying must be used.
++ * For paravirtualised guests, flipping is the default.
++ */
++#ifdef CONFIG_XEN
++static int MODPARM_rx_copy = 0;
++module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
++MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
++static int MODPARM_rx_flip = 0;
++module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
++MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
++#else
++static const int MODPARM_rx_copy = 1;
++static const int MODPARM_rx_flip = 0;
++#endif
++
++#define RX_COPY_THRESHOLD 256
++
++/* If we don't have GSO, fake things up so that we never try to use it. */
++#if defined(NETIF_F_GSO)
++#define HAVE_GSO			1
++#define HAVE_TSO			1 /* TSO is a subset of GSO */
++#define HAVE_CSUM_OFFLOAD		1
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++	/* Turn off all GSO bits except ROBUST. */
++	dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
++	dev->features |= NETIF_F_GSO_ROBUST;
++}
++#elif defined(NETIF_F_TSO)
++#define HAVE_GSO		       0
++#define HAVE_TSO                       1
 +
-+cpumask_t cpu_online_map;
-+EXPORT_SYMBOL(cpu_online_map);
-+cpumask_t cpu_possible_map;
-+EXPORT_SYMBOL(cpu_possible_map);
-+cpumask_t cpu_initialized_map;
++/* Some older kernels cannot cope with incorrect checksums,
++ * particularly in netfilter. I'm not sure there is 100% correlation
++ * with the presence of NETIF_F_TSO but it appears to be a good first
++ * approximiation.
++ */
++#define HAVE_CSUM_OFFLOAD              0
 +
-+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_data);
++#define gso_size tso_size
++#define gso_segs tso_segs
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++       /* Turn off all TSO bits. */
++       dev->features &= ~NETIF_F_TSO;
++}
++static inline int skb_is_gso(const struct sk_buff *skb)
++{
++        return skb_shinfo(skb)->tso_size;
++}
++static inline int skb_gso_ok(struct sk_buff *skb, int features)
++{
++        return (features & NETIF_F_TSO);
++}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
++{
++        return skb_is_gso(skb) &&
++               (!skb_gso_ok(skb, dev->features) ||
++                unlikely(skb->ip_summed != CHECKSUM_HW));
++}
++#else
++#define HAVE_GSO			0
++#define HAVE_TSO			0
++#define HAVE_CSUM_OFFLOAD		0
++#define netif_needs_gso(dev, skb)	0
++#define dev_disable_gso_features(dev)	((void)0)
++#define ethtool_op_set_tso(dev, data)	(-ENOSYS)
 +#endif
 +
-+static DEFINE_PER_CPU(int, resched_irq);
-+static DEFINE_PER_CPU(int, callfunc_irq);
-+static char resched_name[NR_CPUS][15];
-+static char callfunc_name[NR_CPUS][15];
-+
-+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++#define GRANT_INVALID_REF	0
 +
-+void *xquad_portio;
++struct netfront_rx_info {
++	struct netif_rx_response rx;
++	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++};
 +
-+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_core_map);
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss).
++ */
++#define netfront_carrier_on(netif)	((netif)->carrier = 1)
++#define netfront_carrier_off(netif)	((netif)->carrier = 0)
++#define netfront_carrier_ok(netif)	((netif)->carrier)
 +
-+#if defined(__i386__)
-+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+#elif !defined(CONFIG_X86_IO_APIC)
-+unsigned int maxcpus = NR_CPUS;
-+#endif
++/*
++ * Access macros for acquiring freeing slots in tx_skbs[].
++ */
 +
-+void __init prefill_possible_map(void)
++static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
 +{
-+	int i, rc;
++	list[id] = list[0];
++	list[0]  = (void *)(unsigned long)id;
++}
 +
-+	for_each_possible_cpu(i)
-+	    if (i != smp_processor_id())
-+		return;
++static inline unsigned short get_id_from_freelist(struct sk_buff **list)
++{
++	unsigned int id = (unsigned int)(unsigned long)list[0];
++	list[0] = list[id];
++	return id;
++}
 +
-+	for (i = 0; i < NR_CPUS; i++) {
-+		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
-+		if (rc >= 0)
-+			cpu_set(i, cpu_possible_map);
-+	}
++static inline int xennet_rxidx(RING_IDX idx)
++{
++	return idx & (NET_RX_RING_SIZE - 1);
 +}
 +
-+void __init smp_alloc_memory(void)
++static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
++						RING_IDX ri)
 +{
++	int i = xennet_rxidx(ri);
++	struct sk_buff *skb = np->rx_skbs[i];
++	np->rx_skbs[i] = NULL;
++	return skb;
 +}
 +
-+static inline void
-+set_cpu_sibling_map(int cpu)
++static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
++					    RING_IDX ri)
 +{
-+	phys_proc_id[cpu] = cpu;
-+	cpu_core_id[cpu]  = 0;
++	int i = xennet_rxidx(ri);
++	grant_ref_t ref = np->grant_rx_ref[i];
++	np->grant_rx_ref[i] = GRANT_INVALID_REF;
++	return ref;
++}
 +
-+	cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
-+	cpu_core_map[cpu]    = cpumask_of_cpu(cpu);
++#define DPRINTK(fmt, args...)				\
++	pr_debug("netfront (%s:%d) " fmt,		\
++		 __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...)				\
++	printk(KERN_INFO "netfront: " fmt, ##args)
++#define WPRINTK(fmt, args...)				\
++	printk(KERN_WARNING "netfront: " fmt, ##args)
 +
-+	cpu_data[cpu].booted_cores = 1;
-+}
++static int setup_device(struct xenbus_device *, struct netfront_info *);
++static struct net_device *create_netdev(struct xenbus_device *);
 +
-+static void
-+remove_siblinginfo(int cpu)
-+{
-+	phys_proc_id[cpu] = BAD_APICID;
-+	cpu_core_id[cpu]  = BAD_APICID;
++static void end_access(int, void *);
++static void netif_disconnect_backend(struct netfront_info *);
 +
-+	cpus_clear(cpu_sibling_map[cpu]);
-+	cpus_clear(cpu_core_map[cpu]);
++static int network_connect(struct net_device *);
++static void network_tx_buf_gc(struct net_device *);
++static void network_alloc_rx_buffers(struct net_device *);
++static void send_fake_arp(struct net_device *);
 +
-+	cpu_data[cpu].booted_cores = 0;
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++
++#ifdef CONFIG_SYSFS
++static int xennet_sysfs_addif(struct net_device *netdev);
++static void xennet_sysfs_delif(struct net_device *netdev);
++#else /* !CONFIG_SYSFS */
++#define xennet_sysfs_addif(dev) (0)
++#define xennet_sysfs_delif(dev) do { } while(0)
++#endif
++
++static inline int xennet_can_sg(struct net_device *dev)
++{
++	return dev->features & NETIF_F_SG;
 +}
 +
-+static int xen_smp_intr_init(unsigned int cpu)
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures and the ring buffers for communication with the backend, and
++ * inform the backend of the appropriate details for those.
++ */
++static int __devinit netfront_probe(struct xenbus_device *dev,
++				    const struct xenbus_device_id *id)
 +{
-+	int rc;
++	int err;
++	struct net_device *netdev;
++	struct netfront_info *info;
 +
-+	per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
++	netdev = create_netdev(dev);
++	if (IS_ERR(netdev)) {
++		err = PTR_ERR(netdev);
++		xenbus_dev_fatal(dev, err, "creating netdev");
++		return err;
++	}
 +
-+	sprintf(resched_name[cpu], "resched%d", cpu);
-+	rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
-+				    cpu,
-+				    smp_reschedule_interrupt,
-+				    SA_INTERRUPT,
-+				    resched_name[cpu],
-+				    NULL);
-+	if (rc < 0)
-+		goto fail;
-+	per_cpu(resched_irq, cpu) = rc;
++	info = netdev_priv(netdev);
++	dev->dev.driver_data = info;
 +
-+	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-+	rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
-+				    cpu,
-+				    smp_call_function_interrupt,
-+				    SA_INTERRUPT,
-+				    callfunc_name[cpu],
-+				    NULL);
-+	if (rc < 0)
++	err = register_netdev(info->netdev);
++	if (err) {
++		printk(KERN_WARNING "%s: register_netdev err=%d\n",
++		       __FUNCTION__, err);
 +		goto fail;
-+	per_cpu(callfunc_irq, cpu) = rc;
++	}
 +
-+	if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
++	err = xennet_sysfs_addif(info->netdev);
++	if (err) {
++		unregister_netdev(info->netdev);
++		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
++		       __FUNCTION__, err);
 +		goto fail;
++	}
 +
 +	return 0;
 +
 + fail:
-+	if (per_cpu(resched_irq, cpu) >= 0)
-+		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+	if (per_cpu(callfunc_irq, cpu) >= 0)
-+		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+	return rc;
++	free_netdev(netdev);
++	dev->dev.driver_data = NULL;
++	return err;
 +}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void xen_smp_intr_exit(unsigned int cpu)
++static int __devexit netfront_remove(struct xenbus_device *dev)
 +{
-+	if (cpu != 0)
-+		local_teardown_timer(cpu);
++	struct netfront_info *info = dev->dev.driver_data;
 +
-+	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+}
-+#endif
++	DPRINTK("%s\n", dev->nodename);
 +
-+void cpu_bringup(void)
-+{
-+	cpu_init();
-+	touch_softlockup_watchdog();
-+	preempt_disable();
-+	local_irq_enable();
-+}
++	netfront_accelerator_call_remove(info, dev);
 +
-+static void cpu_bringup_and_idle(void)
-+{
-+	cpu_bringup();
-+	cpu_idle();
-+}
++	netif_disconnect_backend(info);
 +
-+static void cpu_initialize_context(unsigned int cpu)
-+{
-+	vcpu_guest_context_t ctxt;
-+	struct task_struct *idle = idle_task(cpu);
-+#ifdef __x86_64__
-+	struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
-+#else
-+	struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
-+#endif
++	del_timer_sync(&info->rx_refill_timer);
 +
-+	if (cpu_test_and_set(cpu, cpu_initialized_map))
-+		return;
++	xennet_sysfs_delif(info->netdev);
 +
-+	memset(&ctxt, 0, sizeof(ctxt));
++	unregister_netdev(info->netdev);
 +
-+	ctxt.flags = VGCF_IN_KERNEL;
-+	ctxt.user_regs.ds = __USER_DS;
-+	ctxt.user_regs.es = __USER_DS;
-+	ctxt.user_regs.fs = 0;
-+	ctxt.user_regs.gs = 0;
-+	ctxt.user_regs.ss = __KERNEL_DS;
-+	ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
-+	ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
++	free_netdev(info->netdev);
 +
-+	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
++	return 0;
++}
 +
-+	smp_trap_init(ctxt.trap_ctxt);
 +
-+	ctxt.ldt_ents = 0;
++static int netfront_suspend(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->dev.driver_data;
++	return netfront_accelerator_suspend(info, dev);
++}
 +
-+	ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
-+	ctxt.gdt_ents      = gdt_descr->size / 8;
 +
-+#ifdef __i386__
-+	ctxt.user_regs.cs = __KERNEL_CS;
-+	ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
++static int netfront_suspend_cancel(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->dev.driver_data;
++	return netfront_accelerator_suspend_cancel(info, dev);
++}
 +
-+	ctxt.kernel_ss = __KERNEL_DS;
-+	ctxt.kernel_sp = idle->thread.esp0;
 +
-+	ctxt.event_callback_cs     = __KERNEL_CS;
-+	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-+	ctxt.failsafe_callback_cs  = __KERNEL_CS;
-+	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart.  We tear down our netif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int netfront_resume(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->dev.driver_data;
 +
-+	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
-+#else /* __x86_64__ */
-+	ctxt.user_regs.cs = __KERNEL_CS;
-+	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
++	DPRINTK("%s\n", dev->nodename);
 +
-+	ctxt.kernel_ss = __KERNEL_DS;
-+	ctxt.kernel_sp = idle->thread.rsp0;
++	netfront_accelerator_resume(info, dev);
 +
-+	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-+	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+	ctxt.syscall_callback_eip  = (unsigned long)system_call;
++	netif_disconnect_backend(info);
++	return 0;
++}
 +
-+	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++	char *s, *e, *macstr;
++	int i;
 +
-+	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
-+#endif
++	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++	if (IS_ERR(macstr))
++		return PTR_ERR(macstr);
++
++	for (i = 0; i < ETH_ALEN; i++) {
++		mac[i] = simple_strtoul(s, &e, 16);
++		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++			kfree(macstr);
++			return -ENOENT;
++		}
++		s = e+1;
++	}
 +
-+	BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
++	kfree(macstr);
++	return 0;
 +}
 +
-+void __init smp_prepare_cpus(unsigned int max_cpus)
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++			   struct netfront_info *info)
 +{
-+	int cpu;
-+	struct task_struct *idle;
-+#ifdef __x86_64__
-+	struct desc_ptr *gdt_descr;
-+#else
-+	struct Xgt_desc_struct *gdt_descr;
-+#endif
++	const char *message;
++	struct xenbus_transaction xbt;
++	int err;
 +
-+	boot_cpu_data.apicid = 0;
-+	cpu_data[0] = boot_cpu_data;
++	/* Read mac only in the first setup. */
++	if (!is_valid_ether_addr(info->mac)) {
++		err = xen_net_read_mac(dev, info->mac);
++		if (err) {
++			xenbus_dev_fatal(dev, err, "parsing %s/mac",
++					 dev->nodename);
++			goto out;
++		}
++	}
 +
-+	cpu_2_logical_apicid[0] = 0;
-+	x86_cpu_to_apicid[0] = 0;
++	/* Create shared ring, alloc event channel. */
++	err = setup_device(dev, info);
++	if (err)
++		goto out;
 +
-+	current_thread_info()->cpu = 0;
++	/* This will load an accelerator if one is configured when the
++	 * watch fires */
++	netfront_accelerator_add_watch(info);
 +
-+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+		cpus_clear(cpu_sibling_map[cpu]);
-+		cpus_clear(cpu_core_map[cpu]);
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_ring;
 +	}
 +
-+	set_cpu_sibling_map(0);
++	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
++			    info->tx_ring_ref);
++	if (err) {
++		message = "writing tx ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
++			    info->rx_ring_ref);
++	if (err) {
++		message = "writing rx ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename,
++			    "event-channel", "%u",
++			    irq_to_evtchn_port(info->irq));
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
 +
-+	if (xen_smp_intr_init(0))
-+		BUG();
++	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
++			    info->copying_receiver);
++	if (err) {
++		message = "writing request-rx-copy";
++		goto abort_transaction;
++	}
 +
-+	cpu_initialized_map = cpumask_of_cpu(0);
++	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
++	if (err) {
++		message = "writing feature-rx-notify";
++		goto abort_transaction;
++	}
 +
-+	/* Restrict the possible_map according to max_cpus. */
-+	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
-+		for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
-+			continue;
-+		cpu_clear(cpu, cpu_possible_map);
++	err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload",
++			    "%d", !HAVE_CSUM_OFFLOAD);
++	if (err) {
++		message = "writing feature-no-csum-offload";
++		goto abort_transaction;
 +	}
 +
-+	for_each_possible_cpu (cpu) {
-+		if (cpu == 0)
-+			continue;
++	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
++	if (err) {
++		message = "writing feature-sg";
++		goto abort_transaction;
++	}
 +
-+#ifdef __x86_64__
-+		gdt_descr = &cpu_gdt_descr[cpu];
-+#else
-+		gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
-+#endif
-+		gdt_descr->address = get_zeroed_page(GFP_KERNEL);
-+		if (unlikely(!gdt_descr->address)) {
-+			printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
-+			       cpu);
-+			continue;
-+		}
-+		gdt_descr->size = GDT_SIZE;
-+		memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
-+		make_page_readonly(
-+			(void *)gdt_descr->address,
-+			XENFEAT_writable_descriptor_tables);
++	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d",
++			    HAVE_TSO);
++	if (err) {
++		message = "writing feature-gso-tcpv4";
++		goto abort_transaction;
++	}
 +
-+		cpu_data[cpu] = boot_cpu_data;
-+		cpu_data[cpu].apicid = cpu;
++	err = xenbus_transaction_end(xbt, 0);
++	if (err) {
++		if (err == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_ring;
++	}
 +
-+		cpu_2_logical_apicid[cpu] = cpu;
-+		x86_cpu_to_apicid[cpu] = cpu;
++	return 0;
 +
-+		idle = fork_idle(cpu);
-+		if (IS_ERR(idle))
-+			panic("failed fork for CPU %d", cpu);
++ abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_ring:
++	netfront_accelerator_call_remove(info, dev);
++	netif_disconnect_backend(info);
++ out:
++	return err;
++}
 +
-+#ifdef __x86_64__
-+		cpu_pda(cpu)->pcurrent = idle;
-+		cpu_pda(cpu)->cpunumber = cpu;
-+		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
-+#endif
++static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++{
++	struct netif_tx_sring *txs;
++	struct netif_rx_sring *rxs;
++	int err;
++	struct net_device *netdev = info->netdev;
 +
-+		irq_ctx_init(cpu);
++	info->tx_ring_ref = GRANT_INVALID_REF;
++	info->rx_ring_ref = GRANT_INVALID_REF;
++	info->rx.sring = NULL;
++	info->tx.sring = NULL;
++	info->irq = 0;
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+		if (is_initial_xendomain())
-+			cpu_set(cpu, cpu_present_map);
-+#else
-+		cpu_set(cpu, cpu_present_map);
-+#endif
++	txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++	if (!txs) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev, err, "allocating tx ring page");
++		goto fail;
 +	}
++	SHARED_RING_INIT(txs);
++	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
 +
-+	init_xenbus_allowed_cpumask();
++	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
++	if (err < 0) {
++		free_page((unsigned long)txs);
++		goto fail;
++	}
++	info->tx_ring_ref = err;
 +
-+#ifdef CONFIG_X86_IO_APIC
-+	/*
-+	 * Here we can be sure that there is an IO-APIC in the system. Let's
-+	 * go and set it up:
-+	 */
-+	if (!skip_ioapic_setup && nr_ioapics)
-+		setup_IO_APIC();
-+#endif
-+}
++	rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++	if (!rxs) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev, err, "allocating rx ring page");
++		goto fail;
++	}
++	SHARED_RING_INIT(rxs);
++	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
 +
-+void __devinit smp_prepare_boot_cpu(void)
-+{
-+	prefill_possible_map();
-+}
++	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
++	if (err < 0) {
++		free_page((unsigned long)rxs);
++		goto fail;
++	}
++	info->rx_ring_ref = err;
 +
-+#ifdef CONFIG_HOTPLUG_CPU
++	memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++
++	err = bind_listening_port_to_irqhandler(
++		dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++		netdev);
++	if (err < 0)
++		goto fail;
++	info->irq = err;
 +
-+/*
-+ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
-+ * But do it early enough to catch critical for_each_present_cpu() loops
-+ * in i386-specific code.
-+ */
-+static int __init initialize_cpu_present_map(void)
-+{
-+	cpu_present_map = cpu_possible_map;
 +	return 0;
++
++ fail:
++	return err;
 +}
-+core_initcall(initialize_cpu_present_map);
 +
-+int __cpu_disable(void)
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++			    enum xenbus_state backend_state)
 +{
-+	cpumask_t map = cpu_online_map;
-+	int cpu = smp_processor_id();
++	struct netfront_info *np = dev->dev.driver_data;
++	struct net_device *netdev = np->netdev;
 +
-+	if (cpu == 0)
-+		return -EBUSY;
++	DPRINTK("%s\n", xenbus_strstate(backend_state));
 +
-+	remove_siblinginfo(cpu);
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++	case XenbusStateConnected:
++	case XenbusStateReconfiguring:
++	case XenbusStateReconfigured:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
 +
-+	cpu_clear(cpu, map);
-+	fixup_irqs(map);
-+	cpu_clear(cpu, cpu_online_map);
++	case XenbusStateInitWait:
++		if (dev->state != XenbusStateInitialising)
++			break;
++		if (network_connect(netdev) != 0)
++			break;
++		xenbus_switch_state(dev, XenbusStateConnected);
++		send_fake_arp(netdev);
++		break;
 +
-+	return 0;
++	case XenbusStateClosing:
++		xenbus_frontend_closed(dev);
++		break;
++	}
 +}
 +
-+void __cpu_die(unsigned int cpu)
++/** Send a packet on a net device to encourage switches to learn the
++ * MAC. We send a fake ARP request.
++ *
++ * @param dev device
++ * @return 0 on success, error code otherwise
++ */
++static void send_fake_arp(struct net_device *dev)
 +{
-+	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
-+		current->state = TASK_UNINTERRUPTIBLE;
-+		schedule_timeout(HZ/10);
-+	}
++#ifdef CONFIG_INET
++	struct sk_buff *skb;
++	u32             src_ip, dst_ip;
 +
-+	xen_smp_intr_exit(cpu);
++	dst_ip = INADDR_BROADCAST;
++	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
 +
-+	if (num_online_cpus() == 1)
-+		alternatives_smp_switch(0);
-+}
++	/* No IP? Then nothing to do. */
++	if (src_ip == 0)
++		return;
++
++	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
++			 dst_ip, dev, src_ip,
++			 /*dst_hw*/ NULL, /*src_hw*/ NULL,
++			 /*target_hw*/ dev->dev_addr);
++	if (skb == NULL)
++		return;
 +
-+#else /* !CONFIG_HOTPLUG_CPU */
++	dev_queue_xmit(skb);
++#endif
++}
 +
-+int __cpu_disable(void)
++static inline int netfront_tx_slot_available(struct netfront_info *np)
 +{
-+	return -ENOSYS;
++	return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
++		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
 +}
 +
-+void __cpu_die(unsigned int cpu)
++
++static inline void network_maybe_wake_tx(struct net_device *dev)
 +{
-+	BUG();
++	struct netfront_info *np = netdev_priv(dev);
++
++	if (unlikely(netif_queue_stopped(dev)) &&
++	    netfront_tx_slot_available(np) &&
++	    likely(netif_running(dev)) &&
++	    netfront_check_accelerator_queue_ready(dev, np))
++		netif_wake_queue(dev);
 +}
 +
-+#endif /* CONFIG_HOTPLUG_CPU */
 +
-+int __devinit __cpu_up(unsigned int cpu)
++int netfront_check_queue_ready(struct net_device *dev)
 +{
-+	int rc;
++	struct netfront_info *np = netdev_priv(dev);
 +
-+	rc = cpu_up_check(cpu);
-+	if (rc)
-+		return rc;
++	return unlikely(netif_queue_stopped(dev)) &&
++		netfront_tx_slot_available(np) &&
++		likely(netif_running(dev));
++}
++EXPORT_SYMBOL(netfront_check_queue_ready);
 +
-+	cpu_initialize_context(cpu);
 +
-+	if (num_online_cpus() == 1)
-+		alternatives_smp_switch(1);
++static int network_open(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
 +
-+	/* This must be done before setting cpu_online_map */
-+	set_cpu_sibling_map(cpu);
-+	wmb();
++	memset(&np->stats, 0, sizeof(np->stats));
 +
-+	rc = xen_smp_intr_init(cpu);
-+	if (rc) {
-+		remove_siblinginfo(cpu);
-+		return rc;
-+	}
++	spin_lock_bh(&np->rx_lock);
++	if (netfront_carrier_ok(np)) {
++		network_alloc_rx_buffers(dev);
++		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
++		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
++			netfront_accelerator_call_stop_napi_irq(np, dev);
 +
-+	cpu_set(cpu, cpu_online_map);
++			netif_rx_schedule(dev);
++		}
++	}
++	spin_unlock_bh(&np->rx_lock);
 +
-+	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-+	BUG_ON(rc);
++	network_maybe_wake_tx(dev);
 +
 +	return 0;
 +}
 +
-+void __init smp_cpus_done(unsigned int max_cpus)
-+{
-+}
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+int setup_profiling_timer(unsigned int multiplier)
++static void network_tx_buf_gc(struct net_device *dev)
 +{
-+	return -EINVAL;
-+}
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/xen_proc.c tmp-linux-2.6-xen.patch/drivers/xen/core/xen_proc.c
---- pristine-linux-2.6.18.2/drivers/xen/core/xen_proc.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/xen_proc.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,23 @@
-+
-+#include <linux/module.h>
-+#include <linux/proc_fs.h>
-+#include <xen/xen_proc.h>
++	RING_IDX cons, prod;
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb;
 +
-+static struct proc_dir_entry *xen_base;
++	BUG_ON(!netfront_carrier_ok(np));
 +
-+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-+{
-+	if ( xen_base == NULL )
-+		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-+			panic("Couldn't create /proc/xen");
-+	return create_proc_entry(name, mode, xen_base);
-+}
++	do {
++		prod = np->tx.sring->rsp_prod;
++		rmb(); /* Ensure we see responses up to 'rp'. */
 +
-+EXPORT_SYMBOL_GPL(create_xen_proc_entry); 
++		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
++			struct netif_tx_response *txrsp;
 +
-+void remove_xen_proc_entry(const char *name)
-+{
-+	remove_proc_entry(name, xen_base);
-+}
++			txrsp = RING_GET_RESPONSE(&np->tx, cons);
++			if (txrsp->status == NETIF_RSP_NULL)
++				continue;
 +
-+EXPORT_SYMBOL_GPL(remove_xen_proc_entry); 
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/core/xen_sysfs.c tmp-linux-2.6-xen.patch/drivers/xen/core/xen_sysfs.c
---- pristine-linux-2.6.18.2/drivers/xen/core/xen_sysfs.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/core/xen_sysfs.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,378 @@
-+/*
-+ *  copyright (c) 2006 IBM Corporation
-+ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
-+ *
-+ *  This program is free software; you can redistribute it and/or modify
-+ *  it under the terms of the GNU General Public License version 2 as
-+ *  published by the Free Software Foundation.
-+ */
++			id  = txrsp->id;
++			skb = np->tx_skbs[id];
++			if (unlikely(gnttab_query_foreign_access(
++				np->grant_tx_ref[id]) != 0)) {
++				printk(KERN_ALERT "network_tx_buf_gc: warning "
++				       "-- grant still in use by backend "
++				       "domain.\n");
++				BUG();
++			}
++			gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
++			gnttab_release_grant_reference(
++				&np->gref_tx_head, np->grant_tx_ref[id]);
++			np->grant_tx_ref[id] = GRANT_INVALID_REF;
++			add_id_to_freelist(np->tx_skbs, id);
++			dev_kfree_skb_irq(skb);
++		}
 +
-+#include <linux/err.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
-+#include <xen/hypervisor_sysfs.h>
-+#include <xen/xenbus.h>
++		np->tx.rsp_cons = prod;
 +
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Mike D. Day <ncmike at us.ibm.com>");
++		/*
++		 * Set a new event, then check for race with update of tx_cons.
++		 * Note that it is essential to schedule a callback, no matter
++		 * how few buffers are pending. Even if there is space in the
++		 * transmit ring, higher layers may be blocked because too much
++		 * data is outstanding: in such cases notification from Xen is
++		 * likely to be the only kick that we'll get.
++		 */
++		np->tx.sring->rsp_event =
++			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
++		mb();
++	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
 +
-+static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	return sprintf(buffer, "xen\n");
++	network_maybe_wake_tx(dev);
 +}
 +
-+HYPERVISOR_ATTR_RO(type);
-+
-+static int __init xen_sysfs_type_init(void)
++static void rx_refill_timeout(unsigned long data)
 +{
-+	return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
-+}
++	struct net_device *dev = (struct net_device *)data;
++	struct netfront_info *np = netdev_priv(dev);
 +
-+static void xen_sysfs_type_destroy(void)
-+{
-+	sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++	netfront_accelerator_call_stop_napi_irq(np, dev);
++
++	netif_rx_schedule(dev);
 +}
 +
-+/* xen version attributes */
-+static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
++static void network_alloc_rx_buffers(struct net_device *dev)
 +{
-+	int version = HYPERVISOR_xen_version(XENVER_version, NULL);
-+	if (version)
-+		return sprintf(buffer, "%d\n", version >> 16);
-+	return -ENODEV;
-+}
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb;
++	struct page *page;
++	int i, batch_target, notify;
++	RING_IDX req_prod = np->rx.req_prod_pvt;
++	struct xen_memory_reservation reservation;
++	grant_ref_t ref;
++ 	unsigned long pfn;
++ 	void *vaddr;
++	int nr_flips;
++	netif_rx_request_t *req;
 +
-+HYPERVISOR_ATTR_RO(major);
++	if (unlikely(!netfront_carrier_ok(np)))
++		return;
 +
-+static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int version = HYPERVISOR_xen_version(XENVER_version, NULL);
-+	if (version)
-+		return sprintf(buffer, "%d\n", version & 0xff);
-+	return -ENODEV;
-+}
++	/*
++	 * Allocate skbuffs greedily, even though we batch updates to the
++	 * receive ring. This creates a less bursty demand on the memory
++	 * allocator, so should reduce the chance of failed allocation requests
++	 * both for ourself and for other kernel subsystems.
++	 */
++	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
++	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
++		/*
++		 * Allocate an skb and a page. Do not use __dev_alloc_skb as
++		 * that will allocate page-sized buffers which is not
++		 * necessary here.
++		 * 16 bytes added as necessary headroom for netif_receive_skb.
++		 */
++		skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
++				GFP_ATOMIC | __GFP_NOWARN);
++		if (unlikely(!skb))
++			goto no_skb;
 +
-+HYPERVISOR_ATTR_RO(minor);
++		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++		if (!page) {
++			kfree_skb(skb);
++no_skb:
++			/* Any skbuffs queued for refill? Force them out. */
++			if (i != 0)
++				goto refill;
++			/* Could not allocate any skbuffs. Try again later. */
++			mod_timer(&np->rx_refill_timer,
++				  jiffies + (HZ/10));
++			break;
++		}
 +
-+static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	char *extra;
++		skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
++		skb_shinfo(skb)->frags[0].page = page;
++		skb_shinfo(skb)->nr_frags = 1;
++		__skb_queue_tail(&np->rx_batch, skb);
++	}
 +
-+	extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
-+	if (extra) {
-+		ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", extra);
-+		kfree(extra);
++	/* Is the batch large enough to be worthwhile? */
++	if (i < (np->rx_target/2)) {
++		if (req_prod > np->rx.sring->req_prod)
++			goto push;
++		return;
 +	}
 +
-+	return ret;
-+}
++	/* Adjust our fill target if we risked running out of buffers. */
++	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
++	    ((np->rx_target *= 2) > np->rx_max_target))
++		np->rx_target = np->rx_max_target;
 +
-+HYPERVISOR_ATTR_RO(extra);
++ refill:
++	for (nr_flips = i = 0; ; i++) {
++		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
++			break;
 +
-+static struct attribute *version_attrs[] = {
-+	&major_attr.attr,
-+	&minor_attr.attr,
-+	&extra_attr.attr,
-+	NULL
-+};
++		skb->dev = dev;
 +
-+static struct attribute_group version_group = {
-+	.name = "version",
-+	.attrs = version_attrs,
-+};
++		id = xennet_rxidx(req_prod + i);
 +
-+static int __init xen_sysfs_version_init(void)
-+{
-+	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
-+				  &version_group);
-+}
++		BUG_ON(np->rx_skbs[id]);
++		np->rx_skbs[id] = skb;
 +
-+static void xen_sysfs_version_destroy(void)
-+{
-+	sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
-+}
++		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
++		BUG_ON((signed short)ref < 0);
++		np->grant_rx_ref[id] = ref;
 +
-+/* UUID */
++		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
++		vaddr = page_address(skb_shinfo(skb)->frags[0].page);
 +
-+static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	char *vm, *val;
-+	int ret;
++		req = RING_GET_REQUEST(&np->rx, req_prod + i);
++		if (!np->copying_receiver) {
++			gnttab_grant_foreign_transfer_ref(ref,
++							  np->xbdev->otherend_id,
++							  pfn);
++			np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
++			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++				/* Remove this page before passing
++				 * back to Xen. */
++				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++				MULTI_update_va_mapping(np->rx_mcl+i,
++							(unsigned long)vaddr,
++							__pte(0), 0);
++			}
++			nr_flips++;
++		} else {
++			gnttab_grant_foreign_access_ref(ref,
++							np->xbdev->otherend_id,
++							pfn_to_mfn(pfn),
++							0);
++		}
 +
-+	vm = xenbus_read(XBT_NIL, "vm", "", NULL);
-+	if (IS_ERR(vm))
-+		return PTR_ERR(vm);
-+	val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
-+	kfree(vm);
-+	if (IS_ERR(val))
-+		return PTR_ERR(val);
-+	ret = sprintf(buffer, "%s\n", val);
-+	kfree(val);
-+	return ret;
-+}
++		req->id = id;
++		req->gref = ref;
++	}
 +
-+HYPERVISOR_ATTR_RO(uuid);
++	if ( nr_flips != 0 ) {
++		/* Tell the ballon driver what is going on. */
++		balloon_update_driver_allowance(i);
 +
-+static int __init xen_sysfs_uuid_init(void)
-+{
-+	return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
-+}
++		set_xen_guest_handle(reservation.extent_start,
++				     np->rx_pfn_array);
++		reservation.nr_extents   = nr_flips;
++		reservation.extent_order = 0;
++		reservation.address_bits = 0;
++		reservation.domid        = DOMID_SELF;
 +
-+static void xen_sysfs_uuid_destroy(void)
-+{
-+	sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
-+}
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* After all PTEs have been zapped, flush the TLB. */
++			np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
++				UVMF_TLB_FLUSH|UVMF_ALL;
 +
-+/* xen compilation attributes */
++			/* Give away a batch of pages. */
++			np->rx_mcl[i].op = __HYPERVISOR_memory_op;
++			np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
++			np->rx_mcl[i].args[1] = (unsigned long)&reservation;
 +
-+static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	struct xen_compile_info *info;
++			/* Zap PTEs and give away pages in one big
++			 * multicall. */
++			if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1)))
++				BUG();
 +
-+	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+	if (info) {
-+		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", info->compiler);
-+		kfree(info);
++			/* Check return status of HYPERVISOR_memory_op(). */
++			if (unlikely(np->rx_mcl[i].result != i))
++				panic("Unable to reduce memory reservation\n");
++			while (nr_flips--)
++				BUG_ON(np->rx_mcl[nr_flips].result);
++		} else {
++			if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++						 &reservation) != i)
++				panic("Unable to reduce memory reservation\n");
++		}
++	} else {
++		wmb();
 +	}
 +
-+	return ret;
++	/* Above is a suitable barrier to ensure backend will see requests. */
++	np->rx.req_prod_pvt = req_prod + i;
++ push:
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
++	if (notify)
++		notify_remote_via_irq(np->irq);
 +}
 +
-+HYPERVISOR_ATTR_RO(compiler);
-+
-+static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
++static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
++			      struct netif_tx_request *tx)
 +{
-+	int ret = -ENOMEM;
-+	struct xen_compile_info *info;
-+
-+	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+	if (info) {
-+		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", info->compile_by);
-+		kfree(info);
-+	}
++	struct netfront_info *np = netdev_priv(dev);
++	char *data = skb->data;
++	unsigned long mfn;
++	RING_IDX prod = np->tx.req_prod_pvt;
++	int frags = skb_shinfo(skb)->nr_frags;
++	unsigned int offset = offset_in_page(data);
++	unsigned int len = skb_headlen(skb);
++	unsigned int id;
++	grant_ref_t ref;
++	int i;
 +
-+	return ret;
-+}
++	while (len > PAGE_SIZE - offset) {
++		tx->size = PAGE_SIZE - offset;
++		tx->flags |= NETTXF_more_data;
++		len -= tx->size;
++		data += tx->size;
++		offset = 0;
 +
-+HYPERVISOR_ATTR_RO(compiled_by);
++		id = get_id_from_freelist(np->tx_skbs);
++		np->tx_skbs[id] = skb_get(skb);
++		tx = RING_GET_REQUEST(&np->tx, prod++);
++		tx->id = id;
++		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++		BUG_ON((signed short)ref < 0);
 +
-+static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	struct xen_compile_info *info;
++		mfn = virt_to_mfn(data);
++		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++						mfn, GTF_readonly);
 +
-+	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+	if (info) {
-+		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", info->compile_date);
-+		kfree(info);
++		tx->gref = np->grant_tx_ref[id] = ref;
++		tx->offset = offset;
++		tx->size = len;
++		tx->flags = 0;
 +	}
 +
-+	return ret;
-+}
++	for (i = 0; i < frags; i++) {
++		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 +
-+HYPERVISOR_ATTR_RO(compile_date);
++		tx->flags |= NETTXF_more_data;
++
++		id = get_id_from_freelist(np->tx_skbs);
++		np->tx_skbs[id] = skb_get(skb);
++		tx = RING_GET_REQUEST(&np->tx, prod++);
++		tx->id = id;
++		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++		BUG_ON((signed short)ref < 0);
 +
-+static struct attribute *xen_compile_attrs[] = {
-+	&compiler_attr.attr,
-+	&compiled_by_attr.attr,
-+	&compile_date_attr.attr,
-+	NULL
-+};
++		mfn = pfn_to_mfn(page_to_pfn(frag->page));
++		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++						mfn, GTF_readonly);
 +
-+static struct attribute_group xen_compilation_group = {
-+	.name = "compilation",
-+	.attrs = xen_compile_attrs,
-+};
++		tx->gref = np->grant_tx_ref[id] = ref;
++		tx->offset = frag->page_offset;
++		tx->size = frag->size;
++		tx->flags = 0;
++	}
 +
-+int __init static xen_compilation_init(void)
-+{
-+	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
-+				  &xen_compilation_group);
++	np->tx.req_prod_pvt = prod;
 +}
 +
-+static void xen_compilation_destroy(void)
++static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
-+	sysfs_remove_group(&hypervisor_subsys.kset.kobj,
-+			   &xen_compilation_group);
-+}
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	struct netif_tx_request *tx;
++	struct netif_extra_info *extra;
++	char *data = skb->data;
++	RING_IDX i;
++	grant_ref_t ref;
++	unsigned long mfn;
++	int notify;
++	int frags = skb_shinfo(skb)->nr_frags;
++	unsigned int offset = offset_in_page(data);
++	unsigned int len = skb_headlen(skb);
 +
-+/* xen properties info */
++	/* Check the fast path, if hooks are available */
++ 	if (np->accel_vif_state.hooks && 
++ 	    np->accel_vif_state.hooks->start_xmit(skb, dev)) { 
++ 		/* Fast path has sent this packet */ 
++ 		return 0; 
++ 	} 
 +
-+static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	char *caps;
++	frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
++	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
++		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
++		       frags);
++		dump_stack();
++		goto drop;
++	}
 +
-+	caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
-+	if (caps) {
-+		ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", caps);
-+		kfree(caps);
++	spin_lock_irq(&np->tx_lock);
++
++	if (unlikely(!netfront_carrier_ok(np) ||
++		     (frags > 1 && !xennet_can_sg(dev)) ||
++		     netif_needs_gso(dev, skb))) {
++		spin_unlock_irq(&np->tx_lock);
++		goto drop;
 +	}
 +
-+	return ret;
-+}
++	i = np->tx.req_prod_pvt;
 +
-+HYPERVISOR_ATTR_RO(capabilities);
++	id = get_id_from_freelist(np->tx_skbs);
++	np->tx_skbs[id] = skb;
 +
-+static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	char *cset;
++	tx = RING_GET_REQUEST(&np->tx, i);
 +
-+	cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
-+	if (cset) {
-+		ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
-+		if (!ret)
-+			ret = sprintf(buffer, "%s\n", cset);
-+		kfree(cset);
-+	}
++	tx->id   = id;
++	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++	BUG_ON((signed short)ref < 0);
++	mfn = virt_to_mfn(data);
++	gnttab_grant_foreign_access_ref(
++		ref, np->xbdev->otherend_id, mfn, GTF_readonly);
++	tx->gref = np->grant_tx_ref[id] = ref;
++	tx->offset = offset;
++	tx->size = len;
 +
-+	return ret;
-+}
++	tx->flags = 0;
++	extra = NULL;
 +
-+HYPERVISOR_ATTR_RO(changeset);
++	if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
++#ifdef CONFIG_XEN
++	if (skb->proto_data_valid) /* remote but checksummed? */
++		tx->flags |= NETTXF_data_validated;
++#endif
 +
-+static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	struct xen_platform_parameters *parms;
++#if HAVE_TSO
++	if (skb_shinfo(skb)->gso_size) {
++		struct netif_extra_info *gso = (struct netif_extra_info *)
++			RING_GET_REQUEST(&np->tx, ++i);
 +
-+	parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
-+	if (parms) {
-+		ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
-+					     parms);
-+		if (!ret)
-+			ret = sprintf(buffer, "%lx\n", parms->virt_start);
-+		kfree(parms);
++		if (extra)
++			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
++		else
++			tx->flags |= NETTXF_extra_info;
++
++		gso->u.gso.size = skb_shinfo(skb)->gso_size;
++		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++		gso->u.gso.pad = 0;
++		gso->u.gso.features = 0;
++
++		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++		gso->flags = 0;
++		extra = gso;
 +	}
++#endif
 +
-+	return ret;
-+}
++	np->tx.req_prod_pvt = i + 1;
 +
-+HYPERVISOR_ATTR_RO(virtual_start);
++	xennet_make_frags(skb, dev, tx);
++	tx->size = skb->len;
 +
-+static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+	int ret;
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
++	if (notify)
++		notify_remote_via_irq(np->irq);
 +
-+	ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
-+	if (ret > 0)
-+		ret = sprintf(buffer, "%x\n", ret);
++	np->stats.tx_bytes += skb->len;
++	np->stats.tx_packets++;
++	dev->trans_start = jiffies;
 +
-+	return ret;
-+}
++	/* Note: It is not safe to access skb after network_tx_buf_gc()! */
++	network_tx_buf_gc(dev);
 +
-+HYPERVISOR_ATTR_RO(pagesize);
++	if (!netfront_tx_slot_available(np))
++		netif_stop_queue(dev);
 +
-+/* eventually there will be several more features to export */
-+static ssize_t xen_feature_show(int index, char *buffer)
-+{
-+	int ret = -ENOMEM;
-+	struct xen_feature_info *info;
++	spin_unlock_irq(&np->tx_lock);
 +
-+	info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
-+	if (info) {
-+		info->submap_idx = index;
-+		ret = HYPERVISOR_xen_version(XENVER_get_features, info);
-+		if (!ret)
-+			ret = sprintf(buffer, "%d\n", info->submap);
-+		kfree(info);
-+	}
++	return 0;
 +
-+	return ret;
++ drop:
++	np->stats.tx_dropped++;
++	dev_kfree_skb(skb);
++	return 0;
 +}
 +
-+static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
 +{
-+	return xen_feature_show(XENFEAT_writable_page_tables, buffer);
-+}
++	struct net_device *dev = dev_id;
++	struct netfront_info *np = netdev_priv(dev);
++	unsigned long flags;
 +
-+HYPERVISOR_ATTR_RO(writable_pt);
++	spin_lock_irqsave(&np->tx_lock, flags);
 +
-+static struct attribute *xen_properties_attrs[] = {
-+	&capabilities_attr.attr,
-+	&changeset_attr.attr,
-+	&virtual_start_attr.attr,
-+	&pagesize_attr.attr,
-+	&writable_pt_attr.attr,
-+	NULL
-+};
++	if (likely(netfront_carrier_ok(np))) {
++		network_tx_buf_gc(dev);
++		/* Under tx_lock: protects access to rx shared-ring indexes. */
++		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
++			netfront_accelerator_call_stop_napi_irq(np, dev);
 +
-+static struct attribute_group xen_properties_group = {
-+	.name = "properties",
-+	.attrs = xen_properties_attrs,
-+};
++			netif_rx_schedule(dev);
++			dev->last_rx = jiffies;
++		}
++	}
 +
-+static int __init xen_properties_init(void)
-+{
-+	return sysfs_create_group(&hypervisor_subsys.kset.kobj,
-+				  &xen_properties_group);
-+}
++	spin_unlock_irqrestore(&np->tx_lock, flags);
 +
-+static void xen_properties_destroy(void)
-+{
-+	sysfs_remove_group(&hypervisor_subsys.kset.kobj,
-+			   &xen_properties_group);
++	return IRQ_HANDLED;
 +}
 +
-+static int __init hyper_sysfs_init(void)
++static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
++				grant_ref_t ref)
 +{
-+	int ret;
-+
-+	if (!is_running_on_xen())
-+		return -ENODEV;
-+
-+	ret = xen_sysfs_type_init();
-+	if (ret)
-+		goto out;
-+	ret = xen_sysfs_version_init();
-+	if (ret)
-+		goto version_out;
-+	ret = xen_compilation_init();
-+	if (ret)
-+		goto comp_out;
-+	ret = xen_sysfs_uuid_init();
-+	if (ret)
-+		goto uuid_out;
-+	ret = xen_properties_init();
-+	if (!ret)
-+		goto out;
++	int new = xennet_rxidx(np->rx.req_prod_pvt);
 +
-+	xen_sysfs_uuid_destroy();
-+uuid_out:
-+	xen_compilation_destroy();
-+comp_out:
-+	xen_sysfs_version_destroy();
-+version_out:
-+	xen_sysfs_type_destroy();
-+out:
-+	return ret;
++	BUG_ON(np->rx_skbs[new]);
++	np->rx_skbs[new] = skb;
++	np->grant_rx_ref[new] = ref;
++	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
++	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
++	np->rx.req_prod_pvt++;
 +}
 +
-+static void hyper_sysfs_exit(void)
++int xennet_get_extras(struct netfront_info *np,
++		      struct netif_extra_info *extras, RING_IDX rp)
++
 +{
-+	xen_properties_destroy();
-+	xen_compilation_destroy();
-+	xen_sysfs_uuid_destroy();
-+	xen_sysfs_version_destroy();
-+	xen_sysfs_type_destroy();
++	struct netif_extra_info *extra;
++	RING_IDX cons = np->rx.rsp_cons;
++	int err = 0;
 +
-+}
++	do {
++		struct sk_buff *skb;
++		grant_ref_t ref;
 +
-+module_init(hyper_sysfs_init);
-+module_exit(hyper_sysfs_exit);
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/evtchn/evtchn.c tmp-linux-2.6-xen.patch/drivers/xen/evtchn/evtchn.c
---- pristine-linux-2.6.18.2/drivers/xen/evtchn/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/evtchn/evtchn.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,469 @@
-+/******************************************************************************
-+ * evtchn.c
-+ * 
-+ * Driver for receiving and demuxing event-channel signals.
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Multi-process extensions Copyright (c) 2004, Steven Smith
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++		if (unlikely(cons + 1 == rp)) {
++			if (net_ratelimit())
++				WPRINTK("Missing extra info\n");
++			err = -EBADR;
++			break;
++		}
 +
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/errno.h>
-+#include <linux/miscdevice.h>
-+#include <linux/major.h>
-+#include <linux/proc_fs.h>
-+#include <linux/stat.h>
-+#include <linux/poll.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/gfp.h>
-+#include <linux/mutex.h>
-+#include <xen/evtchn.h>
-+#include <xen/public/evtchn.h>
++		extra = (struct netif_extra_info *)
++			RING_GET_RESPONSE(&np->rx, ++cons);
 +
-+struct per_user_data {
-+	/* Notification ring, accessed via /dev/xen/evtchn. */
-+#define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
-+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
-+	evtchn_port_t *ring;
-+	unsigned int ring_cons, ring_prod, ring_overflow;
-+	struct mutex ring_cons_mutex; /* protect against concurrent readers */
++		if (unlikely(!extra->type ||
++			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++			if (net_ratelimit())
++				WPRINTK("Invalid extra type: %d\n",
++					extra->type);
++			err = -EINVAL;
++		} else {
++			memcpy(&extras[extra->type - 1], extra,
++			       sizeof(*extra));
++		}
 +
-+	/* Processes wait on this queue when ring is empty. */
-+	wait_queue_head_t evtchn_wait;
-+	struct fasync_struct *evtchn_async_queue;
-+};
++		skb = xennet_get_rx_skb(np, cons);
++		ref = xennet_get_rx_ref(np, cons);
++		xennet_move_rx_slot(np, skb, ref);
++	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 +
-+/* Who's bound to each port? */
-+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
-+static spinlock_t port_user_lock;
++	np->rx.rsp_cons = cons;
++	return err;
++}
 +
-+void evtchn_device_upcall(int port)
++static int xennet_get_responses(struct netfront_info *np,
++				struct netfront_rx_info *rinfo, RING_IDX rp,
++				struct sk_buff_head *list,
++				int *pages_flipped_p)
 +{
-+	struct per_user_data *u;
++	int pages_flipped = *pages_flipped_p;
++	struct mmu_update *mmu;
++	struct multicall_entry *mcl;
++	struct netif_rx_response *rx = &rinfo->rx;
++	struct netif_extra_info *extras = rinfo->extras;
++	RING_IDX cons = np->rx.rsp_cons;
++	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
++	grant_ref_t ref = xennet_get_rx_ref(np, cons);
++	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
++	int frags = 1;
++	int err = 0;
++	unsigned long ret;
 +
-+	spin_lock(&port_user_lock);
++	if (rx->flags & NETRXF_extra_info) {
++		err = xennet_get_extras(np, extras, rp);
++		cons = np->rx.rsp_cons;
++	}
 +
-+	mask_evtchn(port);
-+	clear_evtchn(port);
++	for (;;) {
++		unsigned long mfn;
 +
-+	if ((u = port_user[port]) != NULL) {
-+		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
-+			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
-+			if (u->ring_cons == u->ring_prod++) {
-+				wake_up_interruptible(&u->evtchn_wait);
-+				kill_fasync(&u->evtchn_async_queue,
-+					    SIGIO, POLL_IN);
-+			}
-+		} else {
-+			u->ring_overflow = 1;
++		if (unlikely(rx->status < 0 ||
++			     rx->offset + rx->status > PAGE_SIZE)) {
++			if (net_ratelimit())
++				WPRINTK("rx->offset: %x, size: %u\n",
++					rx->offset, rx->status);
++			xennet_move_rx_slot(np, skb, ref);
++			err = -EINVAL;
++			goto next;
 +		}
-+	}
-+
-+	spin_unlock(&port_user_lock);
-+}
 +
-+static ssize_t evtchn_read(struct file *file, char __user *buf,
-+			   size_t count, loff_t *ppos)
-+{
-+	int rc;
-+	unsigned int c, p, bytes1 = 0, bytes2 = 0;
-+	struct per_user_data *u = file->private_data;
++		/*
++		 * This definitely indicates a bug, either in this driver or in
++		 * the backend driver. In future this should flag the bad
++		 * situation to the system controller to reboot the backed.
++		 */
++		if (ref == GRANT_INVALID_REF) {
++			if (net_ratelimit())
++				WPRINTK("Bad rx response id %d.\n", rx->id);
++			err = -EINVAL;
++			goto next;
++		}
 +
-+	/* Whole number of ports. */
-+	count &= ~(sizeof(evtchn_port_t)-1);
++		if (!np->copying_receiver) {
++			/* Memory pressure, insufficient buffer
++			 * headroom, ... */
++			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
++				if (net_ratelimit())
++					WPRINTK("Unfulfilled rx req "
++						"(id=%d, st=%d).\n",
++						rx->id, rx->status);
++				xennet_move_rx_slot(np, skb, ref);
++				err = -ENOMEM;
++				goto next;
++			}
 +
-+	if (count == 0)
-+		return 0;
++			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++				/* Remap the page. */
++				struct page *page =
++					skb_shinfo(skb)->frags[0].page;
++				unsigned long pfn = page_to_pfn(page);
++				void *vaddr = page_address(page);
 +
-+	if (count > PAGE_SIZE)
-+		count = PAGE_SIZE;
++				mcl = np->rx_mcl + pages_flipped;
++				mmu = np->rx_mmu + pages_flipped;
 +
-+	for (;;) {
-+		mutex_lock(&u->ring_cons_mutex);
++				MULTI_update_va_mapping(mcl,
++							(unsigned long)vaddr,
++							pfn_pte_ma(mfn,
++								   PAGE_KERNEL),
++							0);
++				mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++					| MMU_MACHPHYS_UPDATE;
++				mmu->val = pfn;
 +
-+		rc = -EFBIG;
-+		if (u->ring_overflow)
-+			goto unlock_out;
++				set_phys_to_machine(pfn, mfn);
++			}
++			pages_flipped++;
++		} else {
++			ret = gnttab_end_foreign_access_ref(ref);
++			BUG_ON(!ret);
++		}
 +
-+		if ((c = u->ring_cons) != (p = u->ring_prod))
-+			break;
++		gnttab_release_grant_reference(&np->gref_rx_head, ref);
 +
-+		mutex_unlock(&u->ring_cons_mutex);
++		__skb_queue_tail(list, skb);
 +
-+		if (file->f_flags & O_NONBLOCK)
-+			return -EAGAIN;
++next:
++		if (!(rx->flags & NETRXF_more_data))
++			break;
 +
-+		rc = wait_event_interruptible(
-+			u->evtchn_wait, u->ring_cons != u->ring_prod);
-+		if (rc)
-+			return rc;
-+	}
++		if (cons + frags == rp) {
++			if (net_ratelimit())
++				WPRINTK("Need more frags\n");
++			err = -ENOENT;
++			break;
++		}
 +
-+	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-+	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
-+		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
-+			sizeof(evtchn_port_t);
-+		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
-+	} else {
-+		bytes1 = (p - c) * sizeof(evtchn_port_t);
-+		bytes2 = 0;
++		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
++		skb = xennet_get_rx_skb(np, cons + frags);
++		ref = xennet_get_rx_ref(np, cons + frags);
++		frags++;
 +	}
-+
-+	/* Truncate chunks according to caller's maximum byte count. */
-+	if (bytes1 > count) {
-+		bytes1 = count;
-+		bytes2 = 0;
-+	} else if ((bytes1 + bytes2) > count) {
-+		bytes2 = count - bytes1;
++
++	if (unlikely(frags > max)) {
++		if (net_ratelimit())
++			WPRINTK("Too many frags\n");
++		err = -E2BIG;
 +	}
 +
-+	rc = -EFAULT;
-+	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
-+	    ((bytes2 != 0) &&
-+	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
-+		goto unlock_out;
++	if (unlikely(err))
++		np->rx.rsp_cons = cons + frags;
 +
-+	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
-+	rc = bytes1 + bytes2;
++	*pages_flipped_p = pages_flipped;
 +
-+ unlock_out:
-+	mutex_unlock(&u->ring_cons_mutex);
-+	return rc;
++	return err;
 +}
 +
-+static ssize_t evtchn_write(struct file *file, const char __user *buf,
-+			    size_t count, loff_t *ppos)
++static RING_IDX xennet_fill_frags(struct netfront_info *np,
++				  struct sk_buff *skb,
++				  struct sk_buff_head *list)
 +{
-+	int rc, i;
-+	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+	struct per_user_data *u = file->private_data;
-+
-+	if (kbuf == NULL)
-+		return -ENOMEM;
-+
-+	/* Whole number of ports. */
-+	count &= ~(sizeof(evtchn_port_t)-1);
-+
-+	rc = 0;
-+	if (count == 0)
-+		goto out;
++	struct skb_shared_info *shinfo = skb_shinfo(skb);
++	int nr_frags = shinfo->nr_frags;
++	RING_IDX cons = np->rx.rsp_cons;
++	skb_frag_t *frag = shinfo->frags + nr_frags;
++	struct sk_buff *nskb;
 +
-+	if (count > PAGE_SIZE)
-+		count = PAGE_SIZE;
++	while ((nskb = __skb_dequeue(list))) {
++		struct netif_rx_response *rx =
++			RING_GET_RESPONSE(&np->rx, ++cons);
 +
-+	rc = -EFAULT;
-+	if (copy_from_user(kbuf, buf, count) != 0)
-+		goto out;
++		frag->page = skb_shinfo(nskb)->frags[0].page;
++		frag->page_offset = rx->offset;
++		frag->size = rx->status;
 +
-+	spin_lock_irq(&port_user_lock);
-+	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
-+		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
-+			unmask_evtchn(kbuf[i]);
-+	spin_unlock_irq(&port_user_lock);
++		skb->data_len += rx->status;
 +
-+	rc = count;
++		skb_shinfo(nskb)->nr_frags = 0;
++		kfree_skb(nskb);
 +
-+ out:
-+	free_page((unsigned long)kbuf);
-+	return rc;
-+}
++		frag++;
++		nr_frags++;
++	}
 +
-+static void evtchn_bind_to_user(struct per_user_data *u, int port)
-+{
-+	spin_lock_irq(&port_user_lock);
-+	BUG_ON(port_user[port] != NULL);
-+	port_user[port] = u;
-+	unmask_evtchn(port);
-+	spin_unlock_irq(&port_user_lock);
++	shinfo->nr_frags = nr_frags;
++	return cons;
 +}
 +
-+static long evtchn_ioctl(struct file *file,
-+			 unsigned int cmd, unsigned long arg)
++static int xennet_set_skb_gso(struct sk_buff *skb,
++			      struct netif_extra_info *gso)
 +{
-+	int rc;
-+	struct per_user_data *u = file->private_data;
-+	void __user *uarg = (void __user *) arg;
-+
-+	switch (cmd) {
-+	case IOCTL_EVTCHN_BIND_VIRQ: {
-+		struct ioctl_evtchn_bind_virq bind;
-+		struct evtchn_bind_virq bind_virq;
++	if (!gso->u.gso.size) {
++		if (net_ratelimit())
++			WPRINTK("GSO size must not be zero.\n");
++		return -EINVAL;
++	}
 +
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
++	/* Currently only TCPv4 S.O. is supported. */
++	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++		if (net_ratelimit())
++			WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++		return -EINVAL;
++	}
 +
-+		bind_virq.virq = bind.virq;
-+		bind_virq.vcpu = 0;
-+		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+						 &bind_virq);
-+		if (rc != 0)
-+			break;
++#if HAVE_TSO
++	skb_shinfo(skb)->gso_size = gso->u.gso.size;
++#if HAVE_GSO
++	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 +
-+		rc = bind_virq.port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
-+	}
++	/* Header must be checked, and gso_segs computed. */
++	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++#endif
++	skb_shinfo(skb)->gso_segs = 0;
 +
-+	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
-+		struct ioctl_evtchn_bind_interdomain bind;
-+		struct evtchn_bind_interdomain bind_interdomain;
++	return 0;
++#else
++	if (net_ratelimit())
++		WPRINTK("GSO unsupported by this kernel.\n");
++	return -EINVAL;
++#endif
++}
 +
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
++static int netif_poll(struct net_device *dev, int *pbudget)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb;
++	struct netfront_rx_info rinfo;
++	struct netif_rx_response *rx = &rinfo.rx;
++	struct netif_extra_info *extras = rinfo.extras;
++	RING_IDX i, rp;
++	struct multicall_entry *mcl;
++	int work_done, budget, more_to_do = 1, accel_more_to_do = 1;
++	struct sk_buff_head rxq;
++	struct sk_buff_head errq;
++	struct sk_buff_head tmpq;
++	unsigned long flags;
++	unsigned int len;
++	int pages_flipped = 0;
++	int err;
 +
-+		bind_interdomain.remote_dom  = bind.remote_domain;
-+		bind_interdomain.remote_port = bind.remote_port;
-+		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+						 &bind_interdomain);
-+		if (rc != 0)
-+			break;
++	spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
 +
-+		rc = bind_interdomain.local_port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
++	if (unlikely(!netfront_carrier_ok(np))) {
++		spin_unlock(&np->rx_lock);
++		return 0;
 +	}
 +
-+	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
-+		struct ioctl_evtchn_bind_unbound_port bind;
-+		struct evtchn_alloc_unbound alloc_unbound;
++	skb_queue_head_init(&rxq);
++	skb_queue_head_init(&errq);
++	skb_queue_head_init(&tmpq);
 +
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
++	if ((budget = *pbudget) > dev->quota)
++		budget = dev->quota;
++	rp = np->rx.sring->rsp_prod;
++	rmb(); /* Ensure we see queued responses up to 'rp'. */
 +
-+		alloc_unbound.dom        = DOMID_SELF;
-+		alloc_unbound.remote_dom = bind.remote_domain;
-+		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+						 &alloc_unbound);
-+		if (rc != 0)
-+			break;
++	i = np->rx.rsp_cons;
++	work_done = 0;
++	while ((i != rp) && (work_done < budget)) {
++		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
++		memset(extras, 0, sizeof(rinfo.extras));
 +
-+		rc = alloc_unbound.port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
-+	}
++		err = xennet_get_responses(np, &rinfo, rp, &tmpq,
++					   &pages_flipped);
 +
-+	case IOCTL_EVTCHN_UNBIND: {
-+		struct ioctl_evtchn_unbind unbind;
-+		struct evtchn_close close;
-+		int ret;
++		if (unlikely(err)) {
++err:	
++			while ((skb = __skb_dequeue(&tmpq)))
++				__skb_queue_tail(&errq, skb);
++			np->stats.rx_errors++;
++			i = np->rx.rsp_cons;
++			continue;
++		}
 +
-+		rc = -EFAULT;
-+		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
-+			break;
++		skb = __skb_dequeue(&tmpq);
 +
-+		rc = -EINVAL;
-+		if (unbind.port >= NR_EVENT_CHANNELS)
-+			break;
++		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++			struct netif_extra_info *gso;
++			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 +
-+		spin_lock_irq(&port_user_lock);
-+    
-+		rc = -ENOTCONN;
-+		if (port_user[unbind.port] != u) {
-+			spin_unlock_irq(&port_user_lock);
-+			break;
++			if (unlikely(xennet_set_skb_gso(skb, gso))) {
++				__skb_queue_head(&tmpq, skb);
++				np->rx.rsp_cons += skb_queue_len(&tmpq);
++				goto err;
++			}
 +		}
 +
-+		port_user[unbind.port] = NULL;
-+		mask_evtchn(unbind.port);
++		NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
++		NETFRONT_SKB_CB(skb)->offset = rx->offset;
 +
-+		spin_unlock_irq(&port_user_lock);
++		len = rx->status;
++		if (len > RX_COPY_THRESHOLD)
++			len = RX_COPY_THRESHOLD;
++		skb_put(skb, len);
 +
-+		close.port = unbind.port;
-+		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+		BUG_ON(ret);
++		if (rx->status > len) {
++			skb_shinfo(skb)->frags[0].page_offset =
++				rx->offset + len;
++			skb_shinfo(skb)->frags[0].size = rx->status - len;
++			skb->data_len = rx->status - len;
++		} else {
++			skb_shinfo(skb)->frags[0].page = NULL;
++			skb_shinfo(skb)->nr_frags = 0;
++		}
 +
-+		rc = 0;
-+		break;
-+	}
++		i = xennet_fill_frags(np, skb, &tmpq);
 +
-+	case IOCTL_EVTCHN_NOTIFY: {
-+		struct ioctl_evtchn_notify notify;
++		/*
++		 * Truesize must approximates the size of true data plus
++		 * any supervisor overheads. Adding hypervisor overheads
++		 * has been shown to significantly reduce achievable
++		 * bandwidth with the default receive buffer size. It is
++		 * therefore not wise to account for it here.
++		 *
++		 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
++		 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
++		 * add the size of the data pulled in xennet_fill_frags().
++		 *
++		 * We also adjust for any unused space in the main data
++		 * area by subtracting (RX_COPY_THRESHOLD - len). This is
++		 * especially important with drivers which split incoming
++		 * packets into header and data, using only 66 bytes of
++		 * the main data area (see the e1000 driver for example.)
++		 * On such systems, without this last adjustement, our
++		 * achievable receive throughout using the standard receive
++		 * buffer size was cut by 25%(!!!).
++		 */
++		skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
++		skb->len += skb->data_len;
 +
-+		rc = -EFAULT;
-+		if (copy_from_user(&notify, uarg, sizeof(notify)))
-+			break;
++		/*
++		 * Old backends do not assert data_validated but we
++		 * can infer it from csum_blank so test both flags.
++		 */
++		if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++		else
++			skb->ip_summed = CHECKSUM_NONE;
++#ifdef CONFIG_XEN
++		skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
++		skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
++#endif
++		np->stats.rx_packets++;
++		np->stats.rx_bytes += skb->len;
 +
-+		if (notify.port >= NR_EVENT_CHANNELS) {
-+			rc = -EINVAL;
-+		} else if (port_user[notify.port] != u) {
-+			rc = -ENOTCONN;
-+		} else {
-+			notify_remote_via_evtchn(notify.port);
-+			rc = 0;
-+		}
-+		break;
-+	}
++		__skb_queue_tail(&rxq, skb);
 +
-+	case IOCTL_EVTCHN_RESET: {
-+		/* Initialise the ring to empty. Clear errors. */
-+		mutex_lock(&u->ring_cons_mutex);
-+		spin_lock_irq(&port_user_lock);
-+		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
-+		spin_unlock_irq(&port_user_lock);
-+		mutex_unlock(&u->ring_cons_mutex);
-+		rc = 0;
-+		break;
++		np->rx.rsp_cons = ++i;
++		work_done++;
 +	}
 +
-+	default:
-+		rc = -ENOSYS;
-+		break;
++	if (pages_flipped) {
++		/* Some pages are no longer absent... */
++		balloon_update_driver_allowance(-pages_flipped);
++
++		/* Do all the remapping work and M2P updates. */
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			mcl = np->rx_mcl + pages_flipped;
++			mcl->op = __HYPERVISOR_mmu_update;
++			mcl->args[0] = (unsigned long)np->rx_mmu;
++			mcl->args[1] = pages_flipped;
++			mcl->args[2] = 0;
++			mcl->args[3] = DOMID_SELF;
++			err = HYPERVISOR_multicall_check(np->rx_mcl,
++							 pages_flipped + 1,
++							 NULL);
++			BUG_ON(err);
++		}
 +	}
 +
-+	return rc;
-+}
++	while ((skb = __skb_dequeue(&errq)))
++		kfree_skb(skb);
 +
-+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-+{
-+	unsigned int mask = POLLOUT | POLLWRNORM;
-+	struct per_user_data *u = file->private_data;
++	while ((skb = __skb_dequeue(&rxq)) != NULL) {
++		struct page *page = NETFRONT_SKB_CB(skb)->page;
++		void *vaddr = page_address(page);
++		unsigned offset = NETFRONT_SKB_CB(skb)->offset;
 +
-+	poll_wait(file, &u->evtchn_wait, wait);
-+	if (u->ring_cons != u->ring_prod)
-+		mask |= POLLIN | POLLRDNORM;
-+	if (u->ring_overflow)
-+		mask = POLLERR;
-+	return mask;
-+}
++		memcpy(skb->data, vaddr + offset, skb_headlen(skb));
 +
-+static int evtchn_fasync(int fd, struct file *filp, int on)
-+{
-+	struct per_user_data *u = filp->private_data;
-+	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
-+}
++		if (page != skb_shinfo(skb)->frags[0].page)
++			__free_page(page);
 +
-+static int evtchn_open(struct inode *inode, struct file *filp)
-+{
-+	struct per_user_data *u;
++		/* Ethernet work: Delayed to here as it peeks the header. */
++		skb->protocol = eth_type_trans(skb, dev);
 +
-+	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
-+		return -ENOMEM;
++		/* Pass it up. */
++		netif_receive_skb(skb);
++		dev->last_rx = jiffies;
++	}
 +
-+	memset(u, 0, sizeof(*u));
-+	init_waitqueue_head(&u->evtchn_wait);
++	/* If we get a callback with very few responses, reduce fill target. */
++	/* NB. Note exponential increase, linear decrease. */
++	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
++	     ((3*np->rx_target) / 4)) &&
++	    (--np->rx_target < np->rx_min_target))
++		np->rx_target = np->rx_min_target;
 +
-+	u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+	if (u->ring == NULL) {
-+		kfree(u);
-+		return -ENOMEM;
++	network_alloc_rx_buffers(dev);
++
++	if (work_done < budget) {
++		/* there's some spare capacity, try the accelerated path */
++		int accel_budget = budget - work_done;
++		int accel_budget_start = accel_budget;
++
++ 		if (np->accel_vif_state.hooks) { 
++ 			accel_more_to_do =  
++ 				np->accel_vif_state.hooks->netdev_poll 
++ 				(dev, &accel_budget); 
++ 			work_done += (accel_budget_start - accel_budget); 
++ 		} else
++			accel_more_to_do = 0;
 +	}
 +
-+	mutex_init(&u->ring_cons_mutex);
++	*pbudget   -= work_done;
++	dev->quota -= work_done;
 +
-+	filp->private_data = u;
++	if (work_done < budget) {
++		local_irq_save(flags);
 +
-+	return 0;
++		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
++
++		if (!more_to_do && !accel_more_to_do && 
++		    np->accel_vif_state.hooks) {
++			/* 
++			 *  Slow path has nothing more to do, see if
++			 *  fast path is likewise
++			 */
++			accel_more_to_do = 
++				np->accel_vif_state.hooks->start_napi_irq(dev);
++		}
++
++		if (!more_to_do && !accel_more_to_do)
++			__netif_rx_complete(dev);
++
++		local_irq_restore(flags);
++	}
++
++	spin_unlock(&np->rx_lock);
++	
++	return more_to_do | accel_more_to_do;
 +}
 +
-+static int evtchn_release(struct inode *inode, struct file *filp)
++static void netif_release_tx_bufs(struct netfront_info *np)
 +{
++	struct sk_buff *skb;
 +	int i;
-+	struct per_user_data *u = filp->private_data;
-+	struct evtchn_close close;
 +
-+	spin_lock_irq(&port_user_lock);
++	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
++		if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
++			continue;
 +
-+	free_page((unsigned long)u->ring);
++		skb = np->tx_skbs[i];
++		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
++		gnttab_release_grant_reference(
++			&np->gref_tx_head, np->grant_tx_ref[i]);
++		np->grant_tx_ref[i] = GRANT_INVALID_REF;
++		add_id_to_freelist(np->tx_skbs, i);
++		dev_kfree_skb_irq(skb);
++	}
++}
 +
-+	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+		int ret;
-+		if (port_user[i] != u)
-+			continue;
++static void netif_release_rx_bufs_flip(struct netfront_info *np)
++{
++	struct mmu_update      *mmu = np->rx_mmu;
++	struct multicall_entry *mcl = np->rx_mcl;
++	struct sk_buff_head free_list;
++	struct sk_buff *skb;
++	unsigned long mfn;
++	int xfer = 0, noxfer = 0, unused = 0;
++	int id, ref, rc;
 +
-+		port_user[i] = NULL;
-+		mask_evtchn(i);
++	skb_queue_head_init(&free_list);
 +
-+		close.port = i;
-+		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+		BUG_ON(ret);
-+	}
++	spin_lock_bh(&np->rx_lock);
 +
-+	spin_unlock_irq(&port_user_lock);
++	for (id = 0; id < NET_RX_RING_SIZE; id++) {
++		if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
++			unused++;
++			continue;
++		}
 +
-+	kfree(u);
++		skb = np->rx_skbs[id];
++		mfn = gnttab_end_foreign_transfer_ref(ref);
++		gnttab_release_grant_reference(&np->gref_rx_head, ref);
++		np->grant_rx_ref[id] = GRANT_INVALID_REF;
++		add_id_to_freelist(np->rx_skbs, id);
 +
-+	return 0;
-+}
++		if (0 == mfn) {
++			struct page *page = skb_shinfo(skb)->frags[0].page;
++			balloon_release_driver_page(page);
++			skb_shinfo(skb)->nr_frags = 0;
++			dev_kfree_skb(skb);
++			noxfer++;
++			continue;
++		}
 +
-+static const struct file_operations evtchn_fops = {
-+	.owner   = THIS_MODULE,
-+	.read    = evtchn_read,
-+	.write   = evtchn_write,
-+	.unlocked_ioctl = evtchn_ioctl,
-+	.poll    = evtchn_poll,
-+	.fasync  = evtchn_fasync,
-+	.open    = evtchn_open,
-+	.release = evtchn_release,
-+};
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* Remap the page. */
++			struct page *page = skb_shinfo(skb)->frags[0].page;
++			unsigned long pfn = page_to_pfn(page);
++			void *vaddr = page_address(page);
 +
-+static struct miscdevice evtchn_miscdev = {
-+	.minor        = MISC_DYNAMIC_MINOR,
-+	.name         = "evtchn",
-+	.fops         = &evtchn_fops,
-+};
++			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
++						pfn_pte_ma(mfn, PAGE_KERNEL),
++						0);
++			mcl++;
++			mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++				| MMU_MACHPHYS_UPDATE;
++			mmu->val = pfn;
++			mmu++;
 +
-+static int __init evtchn_init(void)
-+{
-+	int err;
++			set_phys_to_machine(pfn, mfn);
++		}
++		__skb_queue_tail(&free_list, skb);
++		xfer++;
++	}
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
++		__FUNCTION__, xfer, noxfer, unused);
 +
-+	spin_lock_init(&port_user_lock);
-+	memset(port_user, 0, sizeof(port_user));
++	if (xfer) {
++		/* Some pages are no longer absent... */
++		balloon_update_driver_allowance(-xfer);
 +
-+	/* Create '/dev/misc/evtchn'. */
-+	err = misc_register(&evtchn_miscdev);
-+	if (err != 0) {
-+		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-+		return err;
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* Do all the remapping work and M2P updates. */
++			mcl->op = __HYPERVISOR_mmu_update;
++			mcl->args[0] = (unsigned long)np->rx_mmu;
++			mcl->args[1] = mmu - np->rx_mmu;
++			mcl->args[2] = 0;
++			mcl->args[3] = DOMID_SELF;
++			mcl++;
++			rc = HYPERVISOR_multicall_check(
++				np->rx_mcl, mcl - np->rx_mcl, NULL);
++			BUG_ON(rc);
++		}
 +	}
 +
-+	printk("Event-channel device installed.\n");
++	while ((skb = __skb_dequeue(&free_list)) != NULL)
++		dev_kfree_skb(skb);
 +
-+	return 0;
++	spin_unlock_bh(&np->rx_lock);
 +}
 +
-+static void evtchn_cleanup(void)
++static void netif_release_rx_bufs_copy(struct netfront_info *np)
 +{
-+	misc_deregister(&evtchn_miscdev);
-+}
++	struct sk_buff *skb;
++	int i, ref;
++	int busy = 0, inuse = 0;
 +
-+module_init(evtchn_init);
-+module_exit(evtchn_cleanup);
++	spin_lock_bh(&np->rx_lock);
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/evtchn/Makefile tmp-linux-2.6-xen.patch/drivers/xen/evtchn/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/evtchn/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/evtchn/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,2 @@
++	for (i = 0; i < NET_RX_RING_SIZE; i++) {
++		ref = np->grant_rx_ref[i];
 +
-+obj-y	:= evtchn.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/fbfront/Makefile tmp-linux-2.6-xen.patch/drivers/xen/fbfront/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/fbfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/fbfront/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,2 @@
-+obj-$(CONFIG_XEN_FRAMEBUFFER)	:= xenfb.o
-+obj-$(CONFIG_XEN_KEYBOARD)	+= xenkbd.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/fbfront/xenfb.c tmp-linux-2.6-xen.patch/drivers/xen/fbfront/xenfb.c
---- pristine-linux-2.6.18.2/drivers/xen/fbfront/xenfb.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/fbfront/xenfb.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,753 @@
-+/*
-+ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
-+ *
-+ * Copyright (C) 2005-2006 Anthony Liguori <aliguori at us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
-+ *
-+ *  Based on linux/drivers/video/q40fb.c
-+ *
-+ *  This file is subject to the terms and conditions of the GNU General Public
-+ *  License. See the file COPYING in the main directory of this archive for
-+ *  more details.
-+ */
++		if (ref == GRANT_INVALID_REF)
++			continue;
 +
-+/*
-+ * TODO:
-+ *
-+ * Switch to grant tables when they become capable of dealing with the
-+ * frame buffer.
-+ */
++		inuse++;
 +
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/fb.h>
-+#include <linux/module.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mm.h>
-+#include <linux/mutex.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/fbif.h>
-+#include <xen/interface/io/protocols.h>
-+#include <xen/xenbus.h>
-+#include <linux/kthread.h>
++		skb = np->rx_skbs[i];
 +
-+struct xenfb_mapping
-+{
-+	struct list_head	link;
-+	struct vm_area_struct	*vma;
-+	atomic_t		map_refs;
-+	int			faults;
-+	struct xenfb_info	*info;
-+};
++		if (!gnttab_end_foreign_access_ref(ref))
++		{
++			busy++;
++			continue;
++		}
 +
-+struct xenfb_info
-+{
-+	struct task_struct	*kthread;
-+	wait_queue_head_t	wq;
++		gnttab_release_grant_reference(&np->gref_rx_head, ref);
++		np->grant_rx_ref[i] = GRANT_INVALID_REF;
++		add_id_to_freelist(np->rx_skbs, i);
 +
-+	unsigned char		*fb;
-+	struct fb_info		*fb_info;
-+	struct timer_list	refresh;
-+	int			dirty;
-+	int			x1, y1, x2, y2;	/* dirty rectangle,
-+						   protected by dirty_lock */
-+	spinlock_t		dirty_lock;
-+	struct mutex		mm_lock;
-+	int			nr_pages;
-+	struct page		**pages;
-+	struct list_head	mappings; /* protected by mm_lock */
++		dev_kfree_skb(skb);
++	}
 +
-+	int			irq;
-+	struct xenfb_page	*page;
-+	unsigned long 		*mfns;
-+	int			update_wanted; /* XENFB_TYPE_UPDATE wanted */
++	if (busy)
++		DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
++			__FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
 +
-+	struct xenbus_device	*xbdev;
-+};
++	spin_unlock_bh(&np->rx_lock);
++}
 +
-+/*
-+ * How the locks work together
-+ *
-+ * There are two locks: spinlock dirty_lock protecting the dirty
-+ * rectangle, and mutex mm_lock protecting mappings.
-+ *
-+ * The problem is that dirty rectangle and mappings aren't
-+ * independent: the dirty rectangle must cover all faulted pages in
-+ * mappings.  We need to prove that our locking maintains this
-+ * invariant.
-+ *
-+ * There are several kinds of critical regions:
-+ *
-+ * 1. Holding only dirty_lock: xenfb_refresh().  May run in
-+ *    interrupts.  Extends the dirty rectangle.  Trivially preserves
-+ *    invariant.
-+ *
-+ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close().  Touch
-+ *    only mappings.  The former creates unfaulted pages.  Preserves
-+ *    invariant.  The latter removes pages.  Preserves invariant.
-+ *
-+ * 3. Holding both locks: xenfb_vm_nopage().  Extends the dirty
-+ *    rectangle and updates mappings consistently.  Preserves
-+ *    invariant.
-+ *
-+ * 4. The ugliest one: xenfb_update_screen().  Clear the dirty
-+ *    rectangle and update mappings consistently.
-+ *
-+ *    We can't simply hold both locks, because zap_page_range() cannot
-+ *    be called with a spinlock held.
-+ *
-+ *    Therefore, we first clear the dirty rectangle with both locks
-+ *    held.  Then we unlock dirty_lock and update the mappings.
-+ *    Critical regions that hold only dirty_lock may interfere with
-+ *    that.  This can only be region 1: xenfb_refresh().  But that
-+ *    just extends the dirty rectangle, which can't harm the
-+ *    invariant.
-+ *
-+ * But FIXME: the invariant is too weak.  It misses that the fault
-+ * record in mappings must be consistent with the mapping of pages in
-+ * the associated address space!  do_no_page() updates the PTE after
-+ * xenfb_vm_nopage() returns, i.e. outside the critical region.  This
-+ * allows the following race:
-+ *
-+ * X writes to some address in the Xen frame buffer
-+ * Fault - call do_no_page()
-+ *     call xenfb_vm_nopage()
-+ *         grab mm_lock
-+ *         map->faults++;
-+ *         release mm_lock
-+ *     return back to do_no_page()
-+ * (preempted, or SMP)
-+ * Xen worker thread runs.
-+ *      grab mm_lock
-+ *      look at mappings
-+ *          find this mapping, zaps its pages (but page not in pte yet)
-+ *          clear map->faults
-+ *      releases mm_lock
-+ * (back to X process)
-+ *     put page in X's pte
-+ *
-+ * Oh well, we wont be updating the writes to this page anytime soon.
-+ */
++static int network_close(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	netif_stop_queue(np->netdev);
++	return 0;
++}
 +
-+static int xenfb_fps = 20;
-+static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
 +
-+static int xenfb_remove(struct xenbus_device *);
-+static void xenfb_init_shared_page(struct xenfb_info *);
-+static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
-+static void xenfb_disconnect_backend(struct xenfb_info *);
++static struct net_device_stats *network_get_stats(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
 +
-+static void xenfb_do_update(struct xenfb_info *info,
-+			    int x, int y, int w, int h)
++	netfront_accelerator_call_get_stats(np, dev);
++	return &np->stats;
++}
++
++static int xennet_set_mac_address(struct net_device *dev, void *p)
 +{
-+	union xenfb_out_event event;
-+	__u32 prod;
++	struct netfront_info *np = netdev_priv(dev);
++	struct sockaddr *addr = p;
 +
-+	event.type = XENFB_TYPE_UPDATE;
-+	event.update.x = x;
-+	event.update.y = y;
-+	event.update.width = w;
-+	event.update.height = h;
++	if (netif_running(dev))
++		return -EBUSY;
 +
-+	prod = info->page->out_prod;
-+	/* caller ensures !xenfb_queue_full() */
-+	mb();			/* ensure ring space available */
-+	XENFB_OUT_RING_REF(info->page, prod) = event;
-+	wmb();			/* ensure ring contents visible */
-+	info->page->out_prod = prod + 1;
++	if (!is_valid_ether_addr(addr->sa_data))
++		return -EADDRNOTAVAIL;
 +
-+	notify_remote_via_irq(info->irq);
++	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++	memcpy(np->mac, addr->sa_data, ETH_ALEN);
++
++	return 0;
 +}
 +
-+static int xenfb_queue_full(struct xenfb_info *info)
++static int xennet_change_mtu(struct net_device *dev, int mtu)
 +{
-+	__u32 cons, prod;
++	int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
 +
-+	prod = info->page->out_prod;
-+	cons = info->page->out_cons;
-+	return prod - cons == XENFB_OUT_RING_LEN;
++	if (mtu > max)
++		return -EINVAL;
++	dev->mtu = mtu;
++	return 0;
 +}
 +
-+static void xenfb_update_screen(struct xenfb_info *info)
++static int xennet_set_sg(struct net_device *dev, u32 data)
 +{
-+	unsigned long flags;
-+	int y1, y2, x1, x2;
-+	struct xenfb_mapping *map;
++	if (data) {
++		struct netfront_info *np = netdev_priv(dev);
++		int val;
 +
-+	if (!info->update_wanted)
-+		return;
-+	if (xenfb_queue_full(info))
-+		return;
++		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
++				 "%d", &val) < 0)
++			val = 0;
++		if (!val)
++			return -ENOSYS;
++	} else if (dev->mtu > ETH_DATA_LEN)
++		dev->mtu = ETH_DATA_LEN;
 +
-+	mutex_lock(&info->mm_lock);
++	return ethtool_op_set_sg(dev, data);
++}
 +
-+	spin_lock_irqsave(&info->dirty_lock, flags);
-+	y1 = info->y1;
-+	y2 = info->y2;
-+	x1 = info->x1;
-+	x2 = info->x2;
-+	info->x1 = info->y1 = INT_MAX;
-+	info->x2 = info->y2 = 0;
-+	spin_unlock_irqrestore(&info->dirty_lock, flags);
++static int xennet_set_tso(struct net_device *dev, u32 data)
++{
++	if (data) {
++		struct netfront_info *np = netdev_priv(dev);
++		int val;
 +
-+	list_for_each_entry(map, &info->mappings, link) {
-+		if (!map->faults)
-+			continue;
-+		zap_page_range(map->vma, map->vma->vm_start,
-+			       map->vma->vm_end - map->vma->vm_start, NULL);
-+		map->faults = 0;
++		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++				 "feature-gso-tcpv4", "%d", &val) < 0)
++			val = 0;
++		if (!val)
++			return -ENOSYS;
 +	}
 +
-+	mutex_unlock(&info->mm_lock);
-+
-+	xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
++	return ethtool_op_set_tso(dev, data);
 +}
 +
-+static int xenfb_thread(void *data)
++static void xennet_set_features(struct net_device *dev)
 +{
-+	struct xenfb_info *info = data;
++	dev_disable_gso_features(dev);
++	xennet_set_sg(dev, 0);
 +
-+	while (!kthread_should_stop()) {
-+		if (info->dirty) {
-+			info->dirty = 0;
-+			xenfb_update_screen(info);
-+		}
-+		wait_event_interruptible(info->wq,
-+			kthread_should_stop() || info->dirty);
-+		try_to_freeze();
-+	}
-+	return 0;
++	/* We need checksum offload to enable scatter/gather and TSO. */
++	if (!(dev->features & NETIF_F_IP_CSUM))
++		return;
++
++	if (xennet_set_sg(dev, 1))
++		return;
++
++	/* Before 2.6.9 TSO seems to be unreliable so do not enable it
++	 * on older kernels.
++	 */
++	if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9))
++		xennet_set_tso(dev, 1);
 +}
 +
-+static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
-+			   unsigned blue, unsigned transp,
-+			   struct fb_info *info)
++static int network_connect(struct net_device *dev)
 +{
-+	u32 v;
++	struct netfront_info *np = netdev_priv(dev);
++	int i, requeue_idx, err;
++	struct sk_buff *skb;
++	grant_ref_t ref;
++	netif_rx_request_t *req;
++	unsigned int feature_rx_copy, feature_rx_flip;
 +
-+	if (regno > info->cmap.len)
-+		return 1;
++	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++			   "feature-rx-copy", "%u", &feature_rx_copy);
++	if (err != 1)
++		feature_rx_copy = 0;
++	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++			   "feature-rx-flip", "%u", &feature_rx_flip);
++	if (err != 1)
++		feature_rx_flip = 1;
 +
-+	red   >>= (16 - info->var.red.length);
-+	green >>= (16 - info->var.green.length);
-+	blue  >>= (16 - info->var.blue.length);
++	/*
++	 * Copy packets on receive path if:
++	 *  (a) This was requested by user, and the backend supports it; or
++	 *  (b) Flipping was requested, but this is unsupported by the backend.
++	 */
++	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
++				(MODPARM_rx_flip && !feature_rx_flip));
 +
-+	v = (red << info->var.red.offset) |
-+	    (green << info->var.green.offset) |
-+	    (blue << info->var.blue.offset);
++	err = talk_to_backend(np->xbdev, np);
++	if (err)
++		return err;
 +
-+	/* FIXME is this sane?  check against xxxfb_setcolreg()!  */
-+	switch (info->var.bits_per_pixel) {
-+	case 16:
-+	case 24:
-+	case 32:
-+		((u32 *)info->pseudo_palette)[regno] = v;
-+		break;
-+	}
-+	
-+	return 0;
-+}
++	xennet_set_features(dev);
 +
-+static void xenfb_timer(unsigned long data)
-+{
-+	struct xenfb_info *info = (struct xenfb_info *)data;
-+	info->dirty = 1;
-+	wake_up(&info->wq);
-+}
++	DPRINTK("device %s has %sing receive path.\n",
++		dev->name, np->copying_receiver ? "copy" : "flipp");
 +
-+static void __xenfb_refresh(struct xenfb_info *info,
-+			    int x1, int y1, int w, int h)
-+{
-+	int y2, x2;
++	spin_lock_bh(&np->rx_lock);
++	spin_lock_irq(&np->tx_lock);
 +
-+	y2 = y1 + h;
-+	x2 = x1 + w;
++	/*
++	 * Recovery procedure:
++	 *  NB. Freelist index entries are always going to be less than
++	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
++	 *  greater than PAGE_OFFSET: we use this property to distinguish
++	 *  them.
++	 */
 +
-+	if (info->y1 > y1)
-+		info->y1 = y1;
-+	if (info->y2 < y2)
-+		info->y2 = y2;
-+	if (info->x1 > x1)
-+		info->x1 = x1;
-+	if (info->x2 < x2)
-+		info->x2 = x2;
++	/* Step 1: Discard all pending TX packet fragments. */
++	netif_release_tx_bufs(np);
 +
-+	if (timer_pending(&info->refresh))
-+		return;
++	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
++	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
++		if (!np->rx_skbs[i])
++			continue;
 +
-+	mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
-+}
++		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
++		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
++		req = RING_GET_REQUEST(&np->rx, requeue_idx);
 +
-+static void xenfb_refresh(struct xenfb_info *info,
-+			  int x1, int y1, int w, int h)
-+{
-+	unsigned long flags;
++		if (!np->copying_receiver) {
++			gnttab_grant_foreign_transfer_ref(
++				ref, np->xbdev->otherend_id,
++				page_to_pfn(skb_shinfo(skb)->frags->page));
++		} else {
++			gnttab_grant_foreign_access_ref(
++				ref, np->xbdev->otherend_id,
++				pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
++						       frags->page)),
++				0);
++		}
++		req->gref = ref;
++		req->id   = requeue_idx;
 +
-+	spin_lock_irqsave(&info->dirty_lock, flags);
-+	__xenfb_refresh(info, x1, y1, w, h);
-+	spin_unlock_irqrestore(&info->dirty_lock, flags);
-+}
++		requeue_idx++;
++	}
 +
-+static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
-+{
-+	struct xenfb_info *info = p->par;
++	np->rx.req_prod_pvt = requeue_idx;
 +
-+	cfb_fillrect(p, rect);
-+	xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
-+}
++	/*
++	 * Step 3: All public and private state should now be sane.  Get
++	 * ready to start sending and receiving packets and give the driver
++	 * domain a kick because we've probably just requeued some
++	 * packets.
++	 */
++	netfront_carrier_on(np);
++	notify_remote_via_irq(np->irq);
++	network_tx_buf_gc(dev);
++	network_alloc_rx_buffers(dev);
 +
-+static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
-+{
-+	struct xenfb_info *info = p->par;
++	spin_unlock_irq(&np->tx_lock);
++	spin_unlock_bh(&np->rx_lock);
 +
-+	cfb_imageblit(p, image);
-+	xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++	return 0;
 +}
 +
-+static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++static void netif_uninit(struct net_device *dev)
 +{
-+	struct xenfb_info *info = p->par;
-+
-+	cfb_copyarea(p, area);
-+	xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
++	struct netfront_info *np = netdev_priv(dev);
++	netif_release_tx_bufs(np);
++	if (np->copying_receiver)
++		netif_release_rx_bufs_copy(np);
++	else
++		netif_release_rx_bufs_flip(np);
++	gnttab_free_grant_references(np->gref_tx_head);
++	gnttab_free_grant_references(np->gref_rx_head);
 +}
 +
-+static void xenfb_vm_open(struct vm_area_struct *vma)
++static struct ethtool_ops network_ethtool_ops =
 +{
-+	struct xenfb_mapping *map = vma->vm_private_data;
-+	atomic_inc(&map->map_refs);
-+}
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = ethtool_op_set_tx_csum,
++	.get_sg = ethtool_op_get_sg,
++	.set_sg = xennet_set_sg,
++#if HAVE_TSO
++	.get_tso = ethtool_op_get_tso,
++	.set_tso = xennet_set_tso,
++#endif
++	.get_link = ethtool_op_get_link,
++};
 +
-+static void xenfb_vm_close(struct vm_area_struct *vma)
++#ifdef CONFIG_SYSFS
++static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
 +{
-+	struct xenfb_mapping *map = vma->vm_private_data;
-+	struct xenfb_info *info = map->info;
++	struct net_device *netdev = container_of(cd, struct net_device,
++						 class_dev);
++	struct netfront_info *info = netdev_priv(netdev);
 +
-+	mutex_lock(&info->mm_lock);
-+	if (atomic_dec_and_test(&map->map_refs)) {
-+		list_del(&map->link);
-+		kfree(map);
-+	}
-+	mutex_unlock(&info->mm_lock);
++	return sprintf(buf, "%u\n", info->rx_min_target);
 +}
 +
-+static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
-+				    unsigned long vaddr, int *type)
++static ssize_t store_rxbuf_min(struct class_device *cd,
++			       const char *buf, size_t len)
 +{
-+	struct xenfb_mapping *map = vma->vm_private_data;
-+	struct xenfb_info *info = map->info;
-+	int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-+	unsigned long flags;
-+	struct page *page;
-+	int y1, y2;
++	struct net_device *netdev = container_of(cd, struct net_device,
++						 class_dev);
++	struct netfront_info *np = netdev_priv(netdev);
++	char *endp;
++	unsigned long target;
 +
-+	if (pgnr >= info->nr_pages)
-+		return NOPAGE_SIGBUS;
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
++	target = simple_strtoul(buf, &endp, 0);
++	if (endp == buf)
++		return -EBADMSG;
++
++	if (target < RX_MIN_TARGET)
++		target = RX_MIN_TARGET;
++	if (target > RX_MAX_TARGET)
++		target = RX_MAX_TARGET;
++
++	spin_lock_bh(&np->rx_lock);
++	if (target > np->rx_max_target)
++		np->rx_max_target = target;
++	np->rx_min_target = target;
++	if (target > np->rx_target)
++		np->rx_target = target;
 +
-+	mutex_lock(&info->mm_lock);
-+	spin_lock_irqsave(&info->dirty_lock, flags);
-+	page = info->pages[pgnr];
-+	get_page(page);
-+	map->faults++;
++	network_alloc_rx_buffers(netdev);
 +
-+	y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
-+	y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
-+	if (y2 > info->fb_info->var.yres)
-+		y2 = info->fb_info->var.yres;
-+	__xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
-+	spin_unlock_irqrestore(&info->dirty_lock, flags);
-+	mutex_unlock(&info->mm_lock);
++	spin_unlock_bh(&np->rx_lock);
++	return len;
++}
 +
-+	if (type)
-+		*type = VM_FAULT_MINOR;
++static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++{
++	struct net_device *netdev = container_of(cd, struct net_device,
++						 class_dev);
++	struct netfront_info *info = netdev_priv(netdev);
 +
-+	return page;
++	return sprintf(buf, "%u\n", info->rx_max_target);
 +}
 +
-+static struct vm_operations_struct xenfb_vm_ops = {
-+	.open	= xenfb_vm_open,
-+	.close	= xenfb_vm_close,
-+	.nopage	= xenfb_vm_nopage,
-+};
-+
-+static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++static ssize_t store_rxbuf_max(struct class_device *cd,
++			       const char *buf, size_t len)
 +{
-+	struct xenfb_info *info = fb_info->par;
-+	struct xenfb_mapping *map;
-+	int map_pages;
++	struct net_device *netdev = container_of(cd, struct net_device,
++						 class_dev);
++	struct netfront_info *np = netdev_priv(netdev);
++	char *endp;
++	unsigned long target;
 +
-+	if (!(vma->vm_flags & VM_WRITE))
-+		return -EINVAL;
-+	if (!(vma->vm_flags & VM_SHARED))
-+		return -EINVAL;
-+	if (vma->vm_pgoff != 0)
-+		return -EINVAL;
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
 +
-+	map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
-+	if (map_pages > info->nr_pages)
-+		return -EINVAL;
++	target = simple_strtoul(buf, &endp, 0);
++	if (endp == buf)
++		return -EBADMSG;
 +
-+	map = kzalloc(sizeof(*map), GFP_KERNEL);
-+	if (map == NULL)
-+		return -ENOMEM;
++	if (target < RX_MIN_TARGET)
++		target = RX_MIN_TARGET;
++	if (target > RX_MAX_TARGET)
++		target = RX_MAX_TARGET;
 +
-+	map->vma = vma;
-+	map->faults = 0;
-+	map->info = info;
-+	atomic_set(&map->map_refs, 1);
++	spin_lock_bh(&np->rx_lock);
++	if (target < np->rx_min_target)
++		np->rx_min_target = target;
++	np->rx_max_target = target;
++	if (target < np->rx_target)
++		np->rx_target = target;
 +
-+	mutex_lock(&info->mm_lock);
-+	list_add(&map->link, &info->mappings);
-+	mutex_unlock(&info->mm_lock);
++	network_alloc_rx_buffers(netdev);
 +
-+	vma->vm_ops = &xenfb_vm_ops;
-+	vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
-+	vma->vm_private_data = map;
++	spin_unlock_bh(&np->rx_lock);
++	return len;
++}
 +
-+	return 0;
++static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
++{
++	struct net_device *netdev = container_of(cd, struct net_device,
++						 class_dev);
++	struct netfront_info *info = netdev_priv(netdev);
++
++	return sprintf(buf, "%u\n", info->rx_target);
 +}
 +
-+static struct fb_ops xenfb_fb_ops = {
-+	.owner		= THIS_MODULE,
-+	.fb_setcolreg	= xenfb_setcolreg,
-+	.fb_fillrect	= xenfb_fillrect,
-+	.fb_copyarea	= xenfb_copyarea,
-+	.fb_imageblit	= xenfb_imageblit,
-+	.fb_mmap	= xenfb_mmap,
++static const struct class_device_attribute xennet_attrs[] = {
++	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
++	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
++	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
 +};
 +
-+static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
-+				       struct pt_regs *regs)
++static int xennet_sysfs_addif(struct net_device *netdev)
 +{
-+	/*
-+	 * No in events recognized, simply ignore them all.
-+	 * If you need to recognize some, see xenbkd's input_handler()
-+	 * for how to do that.
-+	 */
-+	struct xenfb_info *info = dev_id;
-+	struct xenfb_page *page = info->page;
++	int i;
++	int error = 0;
 +
-+	if (page->in_cons != page->in_prod) {
-+		info->page->in_cons = info->page->in_prod;
-+		notify_remote_via_irq(info->irq);
++	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++		error = class_device_create_file(&netdev->class_dev, 
++						 &xennet_attrs[i]);
++		if (error)
++			goto fail;
 +	}
-+	return IRQ_HANDLED;
-+}
++	return 0;
 +
-+static unsigned long vmalloc_to_mfn(void *address)
-+{
-+	return pfn_to_mfn(vmalloc_to_pfn(address));
++ fail:
++	while (--i >= 0)
++		class_device_remove_file(&netdev->class_dev,
++					 &xennet_attrs[i]);
++	return error;
 +}
 +
-+static int __devinit xenfb_probe(struct xenbus_device *dev,
-+				 const struct xenbus_device_id *id)
++static void xennet_sysfs_delif(struct net_device *netdev)
 +{
-+	struct xenfb_info *info;
-+	struct fb_info *fb_info;
-+	int ret;
++	int i;
 +
-+	info = kzalloc(sizeof(*info), GFP_KERNEL);
-+	if (info == NULL) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+		return -ENOMEM;
++	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++		class_device_remove_file(&netdev->class_dev,
++					 &xennet_attrs[i]);
 +	}
-+	dev->dev.driver_data = info;
-+	info->xbdev = dev;
-+	info->irq = -1;
-+	info->x1 = info->y1 = INT_MAX;
-+	spin_lock_init(&info->dirty_lock);
-+	mutex_init(&info->mm_lock);
-+	init_waitqueue_head(&info->wq);
-+	init_timer(&info->refresh);
-+	info->refresh.function = xenfb_timer;
-+	info->refresh.data = (unsigned long)info;
-+	INIT_LIST_HEAD(&info->mappings);
-+
-+	info->fb = vmalloc(xenfb_mem_len);
-+	if (info->fb == NULL)
-+		goto error_nomem;
-+	memset(info->fb, 0, xenfb_mem_len);
-+
-+	info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+
-+	info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
-+			      GFP_KERNEL);
-+	if (info->pages == NULL)
-+		goto error_nomem;
++}
 +
-+	info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
-+	if (!info->mfns)
-+		goto error_nomem;
++#endif /* CONFIG_SYSFS */
 +
-+	/* set up shared page */
-+	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-+	if (!info->page)
-+		goto error_nomem;
 +
-+	xenfb_init_shared_page(info);
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void network_set_multicast_list(struct net_device *dev)
++{
++}
 +
-+	fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
-+				/* see fishy hackery below */
-+	if (fb_info == NULL)
-+		goto error_nomem;
++static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
++{
++	int i, err = 0;
++	struct net_device *netdev = NULL;
++	struct netfront_info *np = NULL;
 +
-+	/* FIXME fishy hackery */
-+	fb_info->pseudo_palette = fb_info->par;
-+	fb_info->par = info;
-+	/* /FIXME */
-+	fb_info->screen_base = info->fb;
++	netdev = alloc_etherdev(sizeof(struct netfront_info));
++	if (!netdev) {
++		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
++		       __FUNCTION__);
++		return ERR_PTR(-ENOMEM);
++	}
 +
-+	fb_info->fbops = &xenfb_fb_ops;
-+	fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
-+	fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
-+	fb_info->var.bits_per_pixel = info->page->depth;
++	np                   = netdev_priv(netdev);
++	np->xbdev            = dev;
 +
-+	fb_info->var.red = (struct fb_bitfield){16, 8, 0};
-+	fb_info->var.green = (struct fb_bitfield){8, 8, 0};
-+	fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
++	spin_lock_init(&np->tx_lock);
++	spin_lock_init(&np->rx_lock);
 +
-+	fb_info->var.activate = FB_ACTIVATE_NOW;
-+	fb_info->var.height = -1;
-+	fb_info->var.width = -1;
-+	fb_info->var.vmode = FB_VMODE_NONINTERLACED;
++	init_accelerator_vif(np, dev);
 +
-+	fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
-+	fb_info->fix.line_length = info->page->line_length;
-+	fb_info->fix.smem_start = 0;
-+	fb_info->fix.smem_len = xenfb_mem_len;
-+	strcpy(fb_info->fix.id, "xen");
-+	fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
-+	fb_info->fix.accel = FB_ACCEL_NONE;
++	skb_queue_head_init(&np->rx_batch);
++	np->rx_target     = RX_DFL_MIN_TARGET;
++	np->rx_min_target = RX_DFL_MIN_TARGET;
++	np->rx_max_target = RX_MAX_TARGET;
 +
-+	fb_info->flags = FBINFO_FLAG_DEFAULT;
++	init_timer(&np->rx_refill_timer);
++	np->rx_refill_timer.data = (unsigned long)netdev;
++	np->rx_refill_timer.function = rx_refill_timeout;
 +
-+	ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
-+	if (ret < 0) {
-+		framebuffer_release(fb_info);
-+		xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
-+		goto error;
++	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
++	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
++		np->tx_skbs[i] = (void *)((unsigned long) i+1);
++		np->grant_tx_ref[i] = GRANT_INVALID_REF;
 +	}
 +
-+	ret = register_framebuffer(fb_info);
-+	if (ret) {
-+		fb_dealloc_cmap(&info->fb_info->cmap);
-+		framebuffer_release(fb_info);
-+		xenbus_dev_fatal(dev, ret, "register_framebuffer");
-+		goto error;
++	for (i = 0; i < NET_RX_RING_SIZE; i++) {
++		np->rx_skbs[i] = NULL;
++		np->grant_rx_ref[i] = GRANT_INVALID_REF;
 +	}
-+	info->fb_info = fb_info;
 +
-+	/* FIXME should this be delayed until backend XenbusStateConnected? */
-+	info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
-+	if (IS_ERR(info->kthread)) {
-+		ret = PTR_ERR(info->kthread);
-+		info->kthread = NULL;
-+		xenbus_dev_fatal(dev, ret, "register_framebuffer");
-+		goto error;
++	/* A grant for every tx ring slot */
++	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
++					  &np->gref_tx_head) < 0) {
++		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
++		err = -ENOMEM;
++		goto exit;
++	}
++	/* A grant for every rx ring slot */
++	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
++					  &np->gref_rx_head) < 0) {
++		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
++		err = -ENOMEM;
++		goto exit_free_tx;
 +	}
 +
-+	ret = xenfb_connect_backend(dev, info);
-+	if (ret < 0)
-+		goto error;
-+
-+	return 0;
-+
-+ error_nomem:
-+	ret = -ENOMEM;
-+	xenbus_dev_fatal(dev, ret, "allocating device memory");
-+ error:
-+	xenfb_remove(dev);
-+	return ret;
-+}
++	netdev->open            = network_open;
++	netdev->hard_start_xmit = network_start_xmit;
++	netdev->stop            = network_close;
++	netdev->get_stats       = network_get_stats;
++	netdev->poll            = netif_poll;
++	netdev->set_multicast_list = network_set_multicast_list;
++	netdev->uninit          = netif_uninit;
++	netdev->set_mac_address	= xennet_set_mac_address;
++	netdev->change_mtu	= xennet_change_mtu;
++	netdev->weight          = 64;
++	netdev->features        = NETIF_F_IP_CSUM;
 +
-+static int xenfb_resume(struct xenbus_device *dev)
-+{
-+	struct xenfb_info *info = dev->dev.driver_data;
++	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
++	SET_MODULE_OWNER(netdev);
++	SET_NETDEV_DEV(netdev, &dev->dev);
 +
-+	xenfb_disconnect_backend(info);
-+	xenfb_init_shared_page(info);
-+	return xenfb_connect_backend(dev, info);
-+}
++	np->netdev = netdev;
 +
-+static int xenfb_remove(struct xenbus_device *dev)
-+{
-+	struct xenfb_info *info = dev->dev.driver_data;
++	netfront_carrier_off(np);
 +
-+	del_timer(&info->refresh);
-+	if (info->kthread)
-+		kthread_stop(info->kthread);
-+	xenfb_disconnect_backend(info);
-+	if (info->fb_info) {
-+		unregister_framebuffer(info->fb_info);
-+		fb_dealloc_cmap(&info->fb_info->cmap);
-+		framebuffer_release(info->fb_info);
-+	}
-+	free_page((unsigned long)info->page);
-+	vfree(info->mfns);
-+	kfree(info->pages);
-+	vfree(info->fb);
-+	kfree(info);
++	return netdev;
 +
-+	return 0;
++ exit_free_tx:
++	gnttab_free_grant_references(np->gref_tx_head);
++ exit:
++	free_netdev(netdev);
++	return ERR_PTR(err);
 +}
 +
-+static void xenfb_init_shared_page(struct xenfb_info *info)
++#ifdef CONFIG_INET
++/*
++ * We use this notifier to send out a fake ARP reply to reset switches and
++ * router ARP caches when an IP interface is brought up on a VIF.
++ */
++static int
++inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
 +{
-+	int i;
-+
-+	for (i = 0; i < info->nr_pages; i++)
-+		info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
++	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr;
++	struct net_device *dev = ifa->ifa_dev->dev;
 +
-+	for (i = 0; i < info->nr_pages; i++)
-+		info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
++	/* UP event and is it one of our devices? */
++	if (event == NETDEV_UP && dev->open == network_open)
++		send_fake_arp(dev);
 +
-+	info->page->pd[0] = vmalloc_to_mfn(info->mfns);
-+	info->page->pd[1] = 0;
-+	info->page->width = XENFB_WIDTH;
-+	info->page->height = XENFB_HEIGHT;
-+	info->page->depth = XENFB_DEPTH;
-+	info->page->line_length = (info->page->depth / 8) * info->page->width;
-+	info->page->mem_length = xenfb_mem_len;
-+	info->page->in_cons = info->page->in_prod = 0;
-+	info->page->out_cons = info->page->out_prod = 0;
++	return NOTIFY_DONE;
 +}
 +
-+static int xenfb_connect_backend(struct xenbus_device *dev,
-+				 struct xenfb_info *info)
-+{
-+	int ret;
-+	struct xenbus_transaction xbt;
++static struct notifier_block notifier_inetdev = {
++	.notifier_call  = inetdev_notify,
++	.next           = NULL,
++	.priority       = 0
++};
++#endif
 +
-+	ret = bind_listening_port_to_irqhandler(
-+		dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
-+	if (ret < 0) {
-+		xenbus_dev_fatal(dev, ret,
-+				 "bind_listening_port_to_irqhandler");
-+		return ret;
-+	}
-+	info->irq = ret;
 +
-+ again:
-+	ret = xenbus_transaction_start(&xbt);
-+	if (ret) {
-+		xenbus_dev_fatal(dev, ret, "starting transaction");
-+		return ret;
-+	}
-+	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-+			    virt_to_mfn(info->page));
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+			    irq_to_evtchn_port(info->irq));
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
-+			    XEN_IO_PROTO_ABI_NATIVE);
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_transaction_end(xbt, 0);
-+	if (ret) {
-+		if (ret == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, ret, "completing transaction");
-+		return ret;
-+	}
++static void netif_disconnect_backend(struct netfront_info *info)
++{
++	/* Stop old i/f to prevent errors whilst we rebuild the state. */
++	spin_lock_bh(&info->rx_lock);
++	spin_lock_irq(&info->tx_lock);
++	netfront_carrier_off(info);
++	spin_unlock_irq(&info->tx_lock);
++	spin_unlock_bh(&info->rx_lock);
 +
-+	xenbus_switch_state(dev, XenbusStateInitialised);
-+	return 0;
++	if (info->irq)
++		unbind_from_irqhandler(info->irq, info->netdev);
++	info->irq = 0;
 +
-+ error_xenbus:
-+	xenbus_transaction_end(xbt, 1);
-+	xenbus_dev_fatal(dev, ret, "writing xenstore");
-+	return ret;
++	end_access(info->tx_ring_ref, info->tx.sring);
++	end_access(info->rx_ring_ref, info->rx.sring);
++	info->tx_ring_ref = GRANT_INVALID_REF;
++	info->rx_ring_ref = GRANT_INVALID_REF;
++	info->tx.sring = NULL;
++	info->rx.sring = NULL;
 +}
 +
-+static void xenfb_disconnect_backend(struct xenfb_info *info)
-+{
-+	if (info->irq >= 0)
-+		unbind_from_irqhandler(info->irq, info);
-+	info->irq = -1;
-+}
 +
-+static void xenfb_backend_changed(struct xenbus_device *dev,
-+				  enum xenbus_state backend_state)
++static void end_access(int ref, void *page)
 +{
-+	struct xenfb_info *info = dev->dev.driver_data;
-+	int val;
-+
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitialised:
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		break;
-+
-+	case XenbusStateInitWait:
-+	InitWait:
-+		xenbus_switch_state(dev, XenbusStateConnected);
-+		break;
++	if (ref != GRANT_INVALID_REF)
++		gnttab_end_foreign_access(ref, (unsigned long)page);
++}
 +
-+	case XenbusStateConnected:
-+		/*
-+		 * Work around xenbus race condition: If backend goes
-+		 * through InitWait to Connected fast enough, we can
-+		 * get Connected twice here.
-+		 */
-+		if (dev->state != XenbusStateConnected)
-+			goto InitWait; /* no InitWait seen yet, fudge it */
 +
-+		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-+				 "request-update", "%d", &val) < 0)
-+			val = 0;
-+		if (val)
-+			info->update_wanted = 1;
-+		break;
++/* ** Driver registration ** */
 +
-+	case XenbusStateClosing:
-+		// FIXME is this safe in any dev->state?
-+		xenbus_frontend_closed(dev);
-+		break;
-+	}
-+}
 +
-+static struct xenbus_device_id xenfb_ids[] = {
-+	{ "vfb" },
++static const struct xenbus_device_id netfront_ids[] = {
++	{ "vif" },
 +	{ "" }
 +};
-+MODULE_ALIAS("xen:vfb");
++MODULE_ALIAS("xen:vif");
 +
-+static struct xenbus_driver xenfb = {
-+	.name = "vfb",
++
++static struct xenbus_driver netfront_driver = {
++	.name = "vif",
 +	.owner = THIS_MODULE,
-+	.ids = xenfb_ids,
-+	.probe = xenfb_probe,
-+	.remove = xenfb_remove,
-+	.resume = xenfb_resume,
-+	.otherend_changed = xenfb_backend_changed,
++	.ids = netfront_ids,
++	.probe = netfront_probe,
++	.remove = __devexit_p(netfront_remove),
++	.suspend = netfront_suspend,
++	.suspend_cancel = netfront_suspend_cancel,
++	.resume = netfront_resume,
++	.otherend_changed = backend_changed,
 +};
 +
-+static int __init xenfb_init(void)
++
++static int __init netif_init(void)
 +{
 +	if (!is_running_on_xen())
 +		return -ENODEV;
 +
-+	/* Nothing to do if running in dom0. */
-+	if (is_initial_xendomain())
-+		return -ENODEV;
++#ifdef CONFIG_XEN
++	if (MODPARM_rx_flip && MODPARM_rx_copy) {
++		WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
++		return -EINVAL;
++	}
++
++	if (!MODPARM_rx_flip && !MODPARM_rx_copy)
++		MODPARM_rx_flip = 1; /* Default is to flip. */
++#endif
++
++	netif_init_accel();
++
++	IPRINTK("Initialising virtual ethernet driver.\n");
++
++#ifdef CONFIG_INET
++	(void)register_inetaddr_notifier(&notifier_inetdev);
++#endif
 +
-+	return xenbus_register_frontend(&xenfb);
++	return xenbus_register_frontend(&netfront_driver);
 +}
++module_init(netif_init);
 +
-+static void __exit xenfb_cleanup(void)
++
++static void __exit netif_exit(void)
 +{
-+	return xenbus_unregister_driver(&xenfb);
-+}
++#ifdef CONFIG_INET
++	unregister_inetaddr_notifier(&notifier_inetdev);
++#endif
 +
-+module_init(xenfb_init);
-+module_exit(xenfb_cleanup);
++	netif_exit_accel();
 +
-+MODULE_LICENSE("GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/fbfront/xenkbd.c tmp-linux-2.6-xen.patch/drivers/xen/fbfront/xenkbd.c
---- pristine-linux-2.6.18.2/drivers/xen/fbfront/xenkbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/fbfront/xenkbd.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,334 @@
-+/*
-+ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++	return xenbus_unregister_driver(&netfront_driver);
++}
++module_exit(netif_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/netfront/netfront.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/netfront/netfront.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,274 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
 + *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
 + *
-+ *  Based on linux/drivers/input/mouse/sermouse.c
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
 + *
-+ *  This file is subject to the terms and conditions of the GNU General Public
-+ *  License. See the file COPYING in the main directory of this archive for
-+ *  more details.
-+ */
-+
-+/*
-+ * TODO:
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
 + *
-+ * Switch to grant tables together with xenfb.c.
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/module.h>
-+#include <linux/input.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/fbif.h>
-+#include <xen/interface/io/kbdif.h>
++#ifndef NETFRONT_H
++#define NETFRONT_H
++
++#include <xen/interface/io/netif.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++
++#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
++
 +#include <xen/xenbus.h>
 +
-+struct xenkbd_info
-+{
-+	struct input_dev *kbd;
-+	struct input_dev *ptr;
-+	struct xenkbd_page *page;
-+	int irq;
-+	struct xenbus_device *xbdev;
-+	char phys[32];
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/* 
++ * Function pointer table for hooks into a network acceleration
++ * plugin.  These are called at appropriate points from the netfront
++ * driver 
++ */
++struct netfront_accel_hooks {
++	/* 
++	 * new_device: Accelerator hook to ask the plugin to support a
++	 * new network interface
++	 */
++	int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev);
++	/*
++	 * remove: Opposite of new_device
++	 */
++	int (*remove)(struct xenbus_device *dev);
++	/*
++	 * The net_device is being polled, check the accelerated
++	 * hardware for any pending packets
++	 */
++	int (*netdev_poll)(struct net_device *dev, int *pbudget);
++	/*
++	 * start_xmit: Used to give the accelerated plugin the option
++	 * of sending a packet.  Returns non-zero if has done so, or
++	 * zero to decline and force the packet onto normal send
++	 * path
++	 */
++	int (*start_xmit)(struct sk_buff *skb, struct net_device *dev);
++	/* 
++	 * start/stop_napi_interrupts Used by netfront to indicate
++	 * when napi interrupts should be enabled or disabled 
++	 */
++	int (*start_napi_irq)(struct net_device *dev);
++	void (*stop_napi_irq)(struct net_device *dev);
++	/* 
++	 * Called before re-enabling the TX queue to check the fast
++	 * path has slots too
++	 */
++	int (*check_ready)(struct net_device *dev);
++	/*
++	 * Get the fastpath network statistics
++	 */
++	int (*get_stats)(struct net_device *dev,
++			 struct net_device_stats *stats);
 +};
 +
-+static int xenkbd_remove(struct xenbus_device *);
-+static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
-+static void xenkbd_disconnect_backend(struct xenkbd_info *);
 +
-+/*
-+ * Note: if you need to send out events, see xenfb_do_update() for how
-+ * to do that.
-+ */
++/* Version of API/protocol for communication between netfront and
++   acceleration plugin supported */
++#define NETFRONT_ACCEL_VERSION 0x00010003
 +
-+static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
-+{
-+	struct xenkbd_info *info = dev_id;
-+	struct xenkbd_page *page = info->page;
-+	__u32 cons, prod;
++/* 
++ * Per-netfront device state for the accelerator.  This is used to
++ * allow efficient per-netfront device access to the accelerator
++ * hooks 
++ */
++struct netfront_accel_vif_state {
++	struct list_head link;
 +
-+	prod = page->in_prod;
-+	if (prod == page->out_cons)
-+		return IRQ_HANDLED;
-+	rmb();			/* ensure we see ring contents up to prod */
-+	for (cons = page->in_cons; cons != prod; cons++) {
-+		union xenkbd_in_event *event;
-+		struct input_dev *dev;
-+		event = &XENKBD_IN_RING_REF(page, cons);
++	struct xenbus_device *dev;
++	struct netfront_info *np;
++	struct netfront_accel_hooks *hooks;
 +
-+		dev = info->ptr;
-+		switch (event->type) {
-+		case XENKBD_TYPE_MOTION:
-+			input_report_rel(dev, REL_X, event->motion.rel_x);
-+			input_report_rel(dev, REL_Y, event->motion.rel_y);
-+			break;
-+		case XENKBD_TYPE_KEY:
-+			dev = NULL;
-+			if (test_bit(event->key.keycode, info->kbd->keybit))
-+				dev = info->kbd;
-+			if (test_bit(event->key.keycode, info->ptr->keybit))
-+				dev = info->ptr;
-+			if (dev)
-+				input_report_key(dev, event->key.keycode,
-+						 event->key.pressed);
-+			else
-+				printk("xenkbd: unhandled keycode 0x%x\n",
-+				       event->key.keycode);
-+			break;
-+		case XENKBD_TYPE_POS:
-+			input_report_abs(dev, ABS_X, event->pos.abs_x);
-+			input_report_abs(dev, ABS_Y, event->pos.abs_y);
-+			break;
-+		}
-+		if (dev)
-+			input_sync(dev);
-+	}
-+	mb();			/* ensure we got ring contents */
-+	page->in_cons = cons;
-+	notify_remote_via_irq(info->irq);
++	/* Watch on the accelerator configuration value */
++	struct xenbus_watch accel_watch;
++	/* Work item to process change in accelerator */
++	struct work_struct accel_work;
++	/* The string from xenbus last time accel_watch fired */
++	char *accel_frontend;
++}; 
 +
-+	return IRQ_HANDLED;
-+}
++/* 
++ * Per-accelerator state stored in netfront.  These form a list that
++ * is used to track which devices are accelerated by which plugins,
++ * and what plugins are available/have been requested 
++ */
++struct netfront_accelerator {
++	/* Used to make a list */
++	struct list_head link;
++	/* ID of the accelerator */
++	int id;
++	/*
++	 * String describing the accelerator.  Currently this is the
++	 * name of the accelerator module.  This is provided by the
++	 * backend accelerator through xenstore 
++	 */
++	char *frontend;
++	/* The hooks into the accelerator plugin module */
++	struct netfront_accel_hooks *hooks;
 +
-+int __devinit xenkbd_probe(struct xenbus_device *dev,
-+			   const struct xenbus_device_id *id)
-+{
-+	int ret, i;
-+	struct xenkbd_info *info;
-+	struct input_dev *kbd, *ptr;
++	/* 
++	 * List of per-netfront device state (struct
++	 * netfront_accel_vif_state) for each netfront device that is
++	 * using this accelerator
++	 */
++	struct list_head vif_states;
++	spinlock_t vif_states_lock;
++};
 +
-+	info = kzalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+		return -ENOMEM;
-+	}
-+	dev->dev.driver_data = info;
-+	info->xbdev = dev;
-+	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
++struct netfront_info {
++	struct list_head list;
++	struct net_device *netdev;
 +
-+	info->page = (void *)__get_free_page(GFP_KERNEL);
-+	if (!info->page)
-+		goto error_nomem;
-+	info->page->in_cons = info->page->in_prod = 0;
-+	info->page->out_cons = info->page->out_prod = 0;
++	struct net_device_stats stats;
 +
-+	/* keyboard */
-+	kbd = input_allocate_device();
-+	if (!kbd)
-+		goto error_nomem;
-+	kbd->name = "Xen Virtual Keyboard";
-+	kbd->phys = info->phys;
-+	kbd->id.bustype = BUS_PCI;
-+	kbd->id.vendor = 0x5853;
-+	kbd->id.product = 0xffff;
-+	kbd->evbit[0] = BIT(EV_KEY);
-+	for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
-+		set_bit(i, kbd->keybit);
-+	for (i = KEY_OK; i < KEY_MAX; i++)
-+		set_bit(i, kbd->keybit);
++	struct netif_tx_front_ring tx;
++	struct netif_rx_front_ring rx;
 +
-+	ret = input_register_device(kbd);
-+	if (ret) {
-+		input_free_device(kbd);
-+		xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
-+		goto error;
-+	}
-+	info->kbd = kbd;
++	spinlock_t   tx_lock;
++	spinlock_t   rx_lock;
 +
-+	/* pointing device */
-+	ptr = input_allocate_device();
-+	if (!ptr)
-+		goto error_nomem;
-+	ptr->name = "Xen Virtual Pointer";
-+	ptr->phys = info->phys;
-+	ptr->id.bustype = BUS_PCI;
-+	ptr->id.vendor = 0x5853;
-+	ptr->id.product = 0xfffe;
-+	ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
-+	for (i = BTN_LEFT; i <= BTN_TASK; i++)
-+		set_bit(i, ptr->keybit);
-+	ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
-+	input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
-+	input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++	unsigned int irq;
++	unsigned int copying_receiver;
++	unsigned int carrier;
 +
-+	ret = input_register_device(ptr);
-+	if (ret) {
-+		input_free_device(ptr);
-+		xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
-+		goto error;
-+	}
-+	info->ptr = ptr;
++	/* Receive-ring batched refills. */
++#define RX_MIN_TARGET 8
++#define RX_DFL_MIN_TARGET 64
++#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++	unsigned rx_min_target, rx_max_target, rx_target;
++	struct sk_buff_head rx_batch;
 +
-+	ret = xenkbd_connect_backend(dev, info);
-+	if (ret < 0)
-+		goto error;
++	struct timer_list rx_refill_timer;
 +
-+	return 0;
++	/*
++	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
++	 * is an index into a chain of free entries.
++	 */
++	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
++	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 +
-+ error_nomem:
-+	ret = -ENOMEM;
-+	xenbus_dev_fatal(dev, ret, "allocating device memory");
-+ error:
-+	xenkbd_remove(dev);
-+	return ret;
-+}
++#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++	grant_ref_t gref_tx_head;
++	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
++	grant_ref_t gref_rx_head;
++	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 +
-+static int xenkbd_resume(struct xenbus_device *dev)
-+{
-+	struct xenkbd_info *info = dev->dev.driver_data;
++	struct xenbus_device *xbdev;
++	int tx_ring_ref;
++	int rx_ring_ref;
++	u8 mac[ETH_ALEN];
 +
-+	xenkbd_disconnect_backend(info);
-+	return xenkbd_connect_backend(dev, info);
-+}
++	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
++	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
++	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 +
-+static int xenkbd_remove(struct xenbus_device *dev)
-+{
-+	struct xenkbd_info *info = dev->dev.driver_data;
++	/* Private pointer to state internal to accelerator module */
++	void *accel_priv;
++	/* The accelerator used by this netfront device */
++	struct netfront_accelerator *accelerator;
++	/* The accelerator state for this netfront device */
++	struct netfront_accel_vif_state accel_vif_state;
++};
 +
-+	xenkbd_disconnect_backend(info);
-+	input_unregister_device(info->kbd);
-+	input_unregister_device(info->ptr);
-+	free_page((unsigned long)info->page);
-+	kfree(info);
-+	return 0;
-+}
 +
-+static int xenkbd_connect_backend(struct xenbus_device *dev,
-+				  struct xenkbd_info *info)
-+{
-+	int ret;
-+	struct xenbus_transaction xbt;
++/* Exported Functions */
 +
-+	ret = bind_listening_port_to_irqhandler(
-+		dev->otherend_id, input_handler, 0, "xenkbd", info);
-+	if (ret < 0) {
-+		xenbus_dev_fatal(dev, ret,
-+				 "bind_listening_port_to_irqhandler");
-+		return ret;
-+	}
-+	info->irq = ret;
++/*
++ * Called by an accelerator plugin module when it has loaded.
++ *
++ * frontend: the string describing the accelerator, currently the module name 
++ * hooks: the hooks for netfront to use to call into the accelerator
++ * version: the version of API between frontend and plugin requested
++ * 
++ * return: 0 on success, <0 on error, >0 (with version supported) on
++ * version mismatch
++ */
++extern int netfront_accelerator_loaded(int version, const char *frontend, 
++				       struct netfront_accel_hooks *hooks);
 +
-+ again:
-+	ret = xenbus_transaction_start(&xbt);
-+	if (ret) {
-+		xenbus_dev_fatal(dev, ret, "starting transaction");
-+		return ret;
-+	}
-+	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-+			    virt_to_mfn(info->page));
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+			    irq_to_evtchn_port(info->irq));
-+	if (ret)
-+		goto error_xenbus;
-+	ret = xenbus_transaction_end(xbt, 0);
-+	if (ret) {
-+		if (ret == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, ret, "completing transaction");
-+		return ret;
-+	}
++/* 
++ * Called by an accelerator plugin module when it is about to unload.
++ *
++ * frontend: the string describing the accelerator.  Must match the
++ * one passed to netfront_accelerator_loaded()
++ */ 
++extern void netfront_accelerator_stop(const char *frontend);
 +
-+	xenbus_switch_state(dev, XenbusStateInitialised);
-+	return 0;
++/* 
++ * Called by an accelerator before waking the net device's TX queue to
++ * ensure the slow path has available slots.  Returns true if OK to
++ * wake, false if still busy 
++ */
++extern int netfront_check_queue_ready(struct net_device *net_dev);
 +
-+ error_xenbus:
-+	xenbus_transaction_end(xbt, 1);
-+	xenbus_dev_fatal(dev, ret, "writing xenstore");
-+	return ret;
-+}
 +
-+static void xenkbd_disconnect_backend(struct xenkbd_info *info)
-+{
-+	if (info->irq >= 0)
-+		unbind_from_irqhandler(info->irq, info);
-+	info->irq = -1;
-+}
++/* Internal-to-netfront Functions */
 +
-+static void xenkbd_backend_changed(struct xenbus_device *dev,
-+				   enum xenbus_state backend_state)
-+{
-+	struct xenkbd_info *info = dev->dev.driver_data;
-+	int ret, val;
++/* 
++ * Call into accelerator and check to see if it has tx space before we
++ * wake the net device's TX queue.  Returns true if OK to wake, false
++ * if still busy
++ */ 
++extern 
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++					   struct netfront_info *np);
++extern
++int netfront_accelerator_call_remove(struct netfront_info *np,
++				     struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend(struct netfront_info *np,
++				 struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++					struct xenbus_device *dev);
++extern
++void netfront_accelerator_resume(struct netfront_info *np,
++				 struct xenbus_device *dev);
++extern
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++					     struct net_device *dev);
++extern
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++					struct net_device *dev);
++extern
++void netfront_accelerator_add_watch(struct netfront_info *np);
++
++extern
++void netif_init_accel(void);
++extern
++void netif_exit_accel(void);
++
++extern
++void init_accelerator_vif(struct netfront_info *np,
++			  struct xenbus_device *dev);
++#endif /* NETFRONT_H */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,17 @@
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
 +
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitialised:
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		break;
++pciback-y := pci_stub.o pciback_ops.o xenbus.o
++pciback-y += conf_space.o conf_space_header.o \
++	     conf_space_capability.o \
++	     conf_space_capability_vpd.o \
++	     conf_space_capability_pm.o \
++             conf_space_quirks.o
++pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
 +
-+	case XenbusStateInitWait:
-+	InitWait:
-+		ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-+				   "feature-abs-pointer", "%d", &val);
-+		if (ret < 0)
-+			val = 0;
-+		if (val) {
-+			ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
-+					    "request-abs-pointer", "1");
-+			if (ret)
-+				; /* FIXME */
-+		}
-+		xenbus_switch_state(dev, XenbusStateConnected);
-+		break;
++ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,426 @@
++/*
++ * PCI Backend - Functions for creating a virtual configuration space for
++ *               exported PCI Devices.
++ *               It's dangerous to allow PCI Driver Domains to change their
++ *               device's resources (memory, i/o ports, interrupts). We need to
++ *               restrict changes to certain PCI Configuration registers:
++ *               BARs, INTERRUPT_PIN, most registers in the header...
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+	case XenbusStateConnected:
-+		/*
-+		 * Work around xenbus race condition: If backend goes
-+		 * through InitWait to Connected fast enough, we can
-+		 * get Connected twice here.
-+		 */
-+		if (dev->state != XenbusStateConnected)
-+			goto InitWait; /* no InitWait seen yet, fudge it */
-+		break;
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
 +
-+	case XenbusStateClosing:
-+		xenbus_frontend_closed(dev);
-+		break;
-+	}
++#define DEFINE_PCI_CONFIG(op,size,type) 			\
++int pciback_##op##_config_##size 				\
++(struct pci_dev *dev, int offset, type value, void *data)	\
++{								\
++	return pci_##op##_config_##size (dev, offset, value);	\
 +}
 +
-+static struct xenbus_device_id xenkbd_ids[] = {
-+	{ "vkbd" },
-+	{ "" }
-+};
-+MODULE_ALIAS("xen:vkbd");
++DEFINE_PCI_CONFIG(read, byte, u8 *)
++DEFINE_PCI_CONFIG(read, word, u16 *)
++DEFINE_PCI_CONFIG(read, dword, u32 *)
 +
-+static struct xenbus_driver xenkbd = {
-+	.name = "vkbd",
-+	.owner = THIS_MODULE,
-+	.ids = xenkbd_ids,
-+	.probe = xenkbd_probe,
-+	.remove = xenkbd_remove,
-+	.resume = xenkbd_resume,
-+	.otherend_changed = xenkbd_backend_changed,
-+};
++DEFINE_PCI_CONFIG(write, byte, u8)
++DEFINE_PCI_CONFIG(write, word, u16)
++DEFINE_PCI_CONFIG(write, dword, u32)
 +
-+static int __init xenkbd_init(void)
++static int conf_space_read(struct pci_dev *dev,
++			   struct config_field_entry *entry, int offset,
++			   u32 * value)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	int ret = 0;
++	struct config_field *field = entry->field;
 +
-+	/* Nothing to do if running in dom0. */
-+	if (is_initial_xendomain())
-+		return -ENODEV;
++	*value = 0;
 +
-+	return xenbus_register_frontend(&xenkbd);
++	switch (field->size) {
++	case 1:
++		if (field->u.b.read)
++			ret = field->u.b.read(dev, offset, (u8 *) value,
++					      entry->data);
++		break;
++	case 2:
++		if (field->u.w.read)
++			ret = field->u.w.read(dev, offset, (u16 *) value,
++					      entry->data);
++		break;
++	case 4:
++		if (field->u.dw.read)
++			ret = field->u.dw.read(dev, offset, value, entry->data);
++		break;
++	}
++	return ret;
 +}
 +
-+static void __exit xenkbd_cleanup(void)
++static int conf_space_write(struct pci_dev *dev,
++			    struct config_field_entry *entry, int offset,
++			    u32 value)
 +{
-+	return xenbus_unregister_driver(&xenkbd);
-+}
-+
-+module_init(xenkbd_init);
-+module_exit(xenkbd_cleanup);
-+
-+MODULE_LICENSE("GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/gntdev/gntdev.c tmp-linux-2.6-xen.patch/drivers/xen/gntdev/gntdev.c
---- pristine-linux-2.6.18.2/drivers/xen/gntdev/gntdev.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/gntdev/gntdev.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,973 @@
-+/******************************************************************************
-+ * gntdev.c
-+ * 
-+ * Device for accessing (in user-space) pages that have been granted by other
-+ * domains.
-+ *
-+ * Copyright (c) 2006-2007, D G Murray.
-+ * 
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ * 
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ */
++	int ret = 0;
++	struct config_field *field = entry->field;
 +
-+#include <asm/atomic.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/fs.h>
-+#include <linux/device.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <xen/gnttab.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/evtchn.h>
-+#include <xen/driver_util.h>
++	switch (field->size) {
++	case 1:
++		if (field->u.b.write)
++			ret = field->u.b.write(dev, offset, (u8) value,
++					       entry->data);
++		break;
++	case 2:
++		if (field->u.w.write)
++			ret = field->u.w.write(dev, offset, (u16) value,
++					       entry->data);
++		break;
++	case 4:
++		if (field->u.dw.write)
++			ret = field->u.dw.write(dev, offset, value,
++						entry->data);
++		break;
++	}
++	return ret;
++}
 +
-+#include <linux/types.h>
-+#include <xen/public/gntdev.h>
++static inline u32 get_mask(int size)
++{
++	if (size == 1)
++		return 0xff;
++	else if (size == 2)
++		return 0xffff;
++	else
++		return 0xffffffff;
++}
 +
++static inline int valid_request(int offset, int size)
++{
++	/* Validate request (no un-aligned requests) */
++	if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
++		return 1;
++	return 0;
++}
 +
-+#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray at cl.cam.ac.uk>"
-+#define DRIVER_DESC   "User-space granted page access driver"
++static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
++			      int offset)
++{
++	if (offset >= 0) {
++		new_val_mask <<= (offset * 8);
++		new_val <<= (offset * 8);
++	} else {
++		new_val_mask >>= (offset * -8);
++		new_val >>= (offset * -8);
++	}
++	val = (val & ~new_val_mask) | (new_val & new_val_mask);
 +
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR(DRIVER_AUTHOR);
-+MODULE_DESCRIPTION(DRIVER_DESC);
++	return val;
++}
 +
-+#define MAX_GRANTS 128
++static int pcibios_err_to_errno(int err)
++{
++	switch (err) {
++	case PCIBIOS_SUCCESSFUL:
++		return XEN_PCI_ERR_success;
++	case PCIBIOS_DEVICE_NOT_FOUND:
++		return XEN_PCI_ERR_dev_not_found;
++	case PCIBIOS_BAD_REGISTER_NUMBER:
++		return XEN_PCI_ERR_invalid_offset;
++	case PCIBIOS_FUNC_NOT_SUPPORTED:
++		return XEN_PCI_ERR_not_implemented;
++	case PCIBIOS_SET_FAILED:
++		return XEN_PCI_ERR_access_denied;
++	}
++	return err;
++}
 +
-+/* A slot can be in one of three states:
-+ *
-+ * 0. GNTDEV_SLOT_INVALID:
-+ *    This slot is not associated with a grant reference, and is therefore free
-+ *    to be overwritten by a new grant reference.
-+ *
-+ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
-+ *    This slot is associated with a grant reference (via the 
-+ *    IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
-+ *
-+ * 2. GNTDEV_SLOT_MAPPED:
-+ *    This slot is associated with a grant reference, and has been mmap()-ed.
-+ */
-+typedef enum gntdev_slot_state {
-+	GNTDEV_SLOT_INVALID = 0,
-+	GNTDEV_SLOT_NOT_YET_MAPPED,
-+	GNTDEV_SLOT_MAPPED
-+} gntdev_slot_state_t;
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++			u32 * ret_val)
++{
++	int err = 0;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry;
++	struct config_field *field;
++	int req_start, req_end, field_start, field_end;
++	/* if read fails for any reason, return 0 (as if device didn't respond) */
++	u32 value = 0, tmp_val;
 +
-+#define GNTDEV_INVALID_HANDLE    -1
-+#define GNTDEV_FREE_LIST_INVALID -1
-+/* Each opened instance of gntdev is associated with a list of grants,
-+ * represented by an array of elements of the following type,
-+ * gntdev_grant_info_t.
-+ */
-+typedef struct gntdev_grant_info {
-+	gntdev_slot_state_t state;
-+	union {
-+		uint32_t free_list_index;
-+		struct {
-+			domid_t domid;
-+			grant_ref_t ref;
-+			grant_handle_t kernel_handle;
-+			grant_handle_t user_handle;
-+			uint64_t dev_bus_addr;
-+		} valid;
-+	} u;
-+} gntdev_grant_info_t;
++	if (unlikely(verbose_request))
++		printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
++		       pci_name(dev), size, offset);
 +
-+/* Private data structure, which is stored in the file pointer for files
-+ * associated with this device.
-+ */
-+typedef struct gntdev_file_private_data {
-+  
-+	/* Array of grant information. */
-+	gntdev_grant_info_t grants[MAX_GRANTS];
++	if (!valid_request(offset, size)) {
++		err = XEN_PCI_ERR_invalid_offset;
++		goto out;
++	}
 +
-+	/* Read/write semaphore used to protect the grants array. */
-+	struct rw_semaphore grants_sem;
++	/* Get the real value first, then modify as appropriate */
++	switch (size) {
++	case 1:
++		err = pci_read_config_byte(dev, offset, (u8 *) & value);
++		break;
++	case 2:
++		err = pci_read_config_word(dev, offset, (u16 *) & value);
++		break;
++	case 4:
++		err = pci_read_config_dword(dev, offset, &value);
++		break;
++	}
 +
-+	/* An array of indices of free slots in the grants array.
-+	 * N.B. An entry in this list may temporarily have the value
-+	 * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
-+	 * from the list by the contiguous allocator, but the list has not yet
-+	 * been compressed. However, this is not visible across invocations of
-+	 * the device.
-+	 */
-+	int32_t free_list[MAX_GRANTS];
-+	
-+	/* The number of free slots in the grants array. */
-+	uint32_t free_list_size;
++	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++		field = cfg_entry->field;
 +
-+	/* Read/write semaphore used to protect the free list. */
-+	struct rw_semaphore free_list_sem;
-+	
-+	/* Index of the next slot after the most recent contiguous allocation, 
-+	 * for use in a next-fit allocator.
-+	 */
-+	uint32_t next_fit_index;
++		req_start = offset;
++		req_end = offset + size;
++		field_start = OFFSET(cfg_entry);
++		field_end = OFFSET(cfg_entry) + field->size;
 +
-+	/* Used to map grants into the kernel, before mapping them into user
-+	 * space.
-+	 */
-+	struct page **foreign_pages;
++		if ((req_start >= field_start && req_start < field_end)
++		    || (req_end > field_start && req_end <= field_end)) {
++			err = conf_space_read(dev, cfg_entry, field_start,
++					      &tmp_val);
++			if (err)
++				goto out;
 +
-+} gntdev_file_private_data_t;
++			value = merge_value(value, tmp_val,
++					    get_mask(field->size),
++					    field_start - req_start);
++		}
++	}
 +
-+/* Module lifecycle operations. */
-+static int __init gntdev_init(void);
-+static void __exit gntdev_exit(void);
++      out:
++	if (unlikely(verbose_request))
++		printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
++		       pci_name(dev), size, offset, value);
 +
-+module_init(gntdev_init);
-+module_exit(gntdev_exit);
++	*ret_val = value;
++	return pcibios_err_to_errno(err);
++}
 +
-+/* File operations. */
-+static int gntdev_open(struct inode *inode, struct file *flip);
-+static int gntdev_release(struct inode *inode, struct file *flip);
-+static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
-+static long gntdev_ioctl(struct file *flip,
-+			 unsigned int cmd, unsigned long arg);
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++{
++	int err = 0, handled = 0;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry;
++	struct config_field *field;
++	u32 tmp_val;
++	int req_start, req_end, field_start, field_end;
 +
-+static struct file_operations gntdev_fops = {
-+	.owner = THIS_MODULE,
-+	.open = gntdev_open,
-+	.release = gntdev_release,
-+	.mmap = gntdev_mmap,
-+	.unlocked_ioctl = gntdev_ioctl
-+};
++	if (unlikely(verbose_request))
++		printk(KERN_DEBUG
++		       "pciback: %s: write request %d bytes at 0x%x = %x\n",
++		       pci_name(dev), size, offset, value);
 +
-+/* VM operations. */
-+static void gntdev_vma_close(struct vm_area_struct *vma);
-+static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
-+			      pte_t *ptep, int is_fullmm);
++	if (!valid_request(offset, size))
++		return XEN_PCI_ERR_invalid_offset;
 +
-+static struct vm_operations_struct gntdev_vmops = {
-+	.close = gntdev_vma_close,
-+	.zap_pte = gntdev_clear_pte
-+};
++	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++		field = cfg_entry->field;
 +
-+/* Global variables. */
++		req_start = offset;
++		req_end = offset + size;
++		field_start = OFFSET(cfg_entry);
++		field_end = OFFSET(cfg_entry) + field->size;
 +
-+/* The driver major number, for use when unregistering the driver. */
-+static int gntdev_major;
++		if ((req_start >= field_start && req_start < field_end)
++		    || (req_end > field_start && req_end <= field_end)) {
++			tmp_val = 0;
 +
-+#define GNTDEV_NAME "gntdev"
++			err = pciback_config_read(dev, field_start,
++						  field->size, &tmp_val);
++			if (err)
++				break;
 +
-+/* Memory mapping functions
-+ * ------------------------
-+ *
-+ * Every granted page is mapped into both kernel and user space, and the two
-+ * following functions return the respective virtual addresses of these pages.
-+ *
-+ * When shadow paging is disabled, the granted page is mapped directly into
-+ * user space; when it is enabled, it is mapped into the kernel and remapped
-+ * into user space using vm_insert_page() (see gntdev_mmap(), below).
-+ */
++			tmp_val = merge_value(tmp_val, value, get_mask(size),
++					      req_start - field_start);
 +
-+/* Returns the virtual address (in user space) of the @page_index'th page
-+ * in the given VM area.
-+ */
-+static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
-+					    int page_index)
-+{
-+	return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
-+}
++			err = conf_space_write(dev, cfg_entry, field_start,
++					       tmp_val);
 +
-+/* Returns the virtual address (in kernel space) of the @slot_index'th page
-+ * mapped by the gntdev instance that owns the given private data struct.
-+ */
-+static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
-+					      int slot_index)
-+{
-+	unsigned long pfn;
-+	void *kaddr;
-+	pfn = page_to_pfn(priv->foreign_pages[slot_index]);
-+	kaddr = pfn_to_kaddr(pfn);
-+	return (unsigned long) kaddr;
-+}
++			/* handled is set true here, but not every byte
++			 * may have been written! Properly detecting if
++			 * every byte is handled is unnecessary as the
++			 * flag is used to detect devices that need
++			 * special helpers to work correctly.
++			 */
++			handled = 1;
++		}
++	}
 +
-+/* Helper functions. */
++	if (!handled && !err) {
++		/* By default, anything not specificially handled above is
++		 * read-only. The permissive flag changes this behavior so
++		 * that anything not specifically handled above is writable.
++		 * This means that some fields may still be read-only because
++		 * they have entries in the config_field list that intercept
++		 * the write and do nothing. */
++		if (dev_data->permissive) {
++			switch (size) {
++			case 1:
++				err = pci_write_config_byte(dev, offset,
++							    (u8) value);
++				break;
++			case 2:
++				err = pci_write_config_word(dev, offset,
++							    (u16) value);
++				break;
++			case 4:
++				err = pci_write_config_dword(dev, offset,
++							     (u32) value);
++				break;
++			}
++		} else if (!dev_data->warned_on_write) {
++			dev_data->warned_on_write = 1;
++			dev_warn(&dev->dev, "Driver tried to write to a "
++				 "read-only configuration space field at offset "
++				 "0x%x, size %d. This may be harmless, but if "
++				 "you have problems with your device:\n"
++				 "1) see permissive attribute in sysfs\n"
++				 "2) report problems to the xen-devel "
++				 "mailing list along with details of your "
++				 "device obtained from lspci.\n", offset, size);
++		}
++	}
 +
-+/* Adds information about a grant reference to the list of grants in the file's
-+ * private data structure. Returns non-zero on failure. On success, sets the
-+ * value of *offset to the offset that should be mmap()-ed in order to map the
-+ * grant reference.
-+ */
-+static int add_grant_reference(struct file *flip,
-+			       struct ioctl_gntdev_grant_ref *op,
-+			       uint64_t *offset)
++	return pcibios_err_to_errno(err);
++}
++
++void pciback_config_free_dyn_fields(struct pci_dev *dev)
 +{
-+	gntdev_file_private_data_t *private_data 
-+		= (gntdev_file_private_data_t *) flip->private_data;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry, *t;
++	struct config_field *field;
 +
-+	uint32_t slot_index;
++	dev_dbg(&dev->dev,
++		"free-ing dynamically allocated virtual configuration space fields\n");
 +
-+	if (unlikely(private_data->free_list_size == 0)) {
-+		return -ENOMEM;
-+	}
++	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++		field = cfg_entry->field;
 +
-+	slot_index = private_data->free_list[--private_data->free_list_size];
++		if (field->clean) {
++			field->clean(field);
 +
-+	/* Copy the grant information into file's private data. */
-+	private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
-+	private_data->grants[slot_index].u.valid.domid = op->domid;
-+	private_data->grants[slot_index].u.valid.ref = op->ref;
++			if (cfg_entry->data)
++				kfree(cfg_entry->data);
 +
-+	/* The offset is calculated as the index of the chosen entry in the
-+	 * file's private data's array of grant information. This is then
-+	 * shifted to give an offset into the virtual "file address space".
-+	 */
-+	*offset = slot_index << PAGE_SHIFT;
++			list_del(&cfg_entry->list);
++			kfree(cfg_entry);
++		}
 +
-+	return 0;
++	}
 +}
 +
-+/* Adds the @count grant references to the contiguous range in the slot array
-+ * beginning at @first_slot. It is assumed that @first_slot was returned by a
-+ * previous invocation of find_contiguous_free_range(), during the same
-+ * invocation of the driver.
-+ */
-+static int add_grant_references(struct file *flip,
-+				int count,
-+				struct ioctl_gntdev_grant_ref *ops,
-+				uint32_t first_slot)
++void pciback_config_reset_dev(struct pci_dev *dev)
 +{
-+	gntdev_file_private_data_t *private_data 
-+		= (gntdev_file_private_data_t *) flip->private_data;
-+	int i;
-+	
-+	for (i = 0; i < count; ++i) {
-+
-+		/* First, mark the slot's entry in the free list as invalid. */
-+		int free_list_index = 
-+			private_data->grants[first_slot+i].u.free_list_index;
-+		private_data->free_list[free_list_index] = 
-+			GNTDEV_FREE_LIST_INVALID;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry;
++	struct config_field *field;
 +
-+		/* Now, update the slot. */
-+		private_data->grants[first_slot+i].state = 
-+			GNTDEV_SLOT_NOT_YET_MAPPED;
-+		private_data->grants[first_slot+i].u.valid.domid =
-+			ops[i].domid;
-+		private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
-+	}
++	dev_dbg(&dev->dev, "resetting virtual configuration space\n");
 +
-+	return 0;	
-+}
++	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++		field = cfg_entry->field;
 +
-+/* Scans through the free list for @flip, removing entries that are marked as
-+ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
-+ * the number of valid entries.
-+ */
-+static void compress_free_list(struct file *flip) 
-+{
-+	gntdev_file_private_data_t *private_data 
-+		= (gntdev_file_private_data_t *) flip->private_data;
-+	int i, j = 0, old_size;
-+	
-+	old_size = private_data->free_list_size;
-+	for (i = 0; i < old_size; ++i) {
-+		if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
-+			private_data->free_list[j] = 
-+				private_data->free_list[i];
-+			++j;
-+		} else {
-+			--private_data->free_list_size;
-+		}
++		if (field->reset)
++			field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
 +	}
 +}
 +
-+/* Searches the grant array in the private data of @flip for a range of
-+ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
-+ *
-+ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
-+ */
-+static int find_contiguous_free_range(struct file *flip,
-+				      uint32_t num_slots) 
++void pciback_config_free_dev(struct pci_dev *dev)
 +{
-+	gntdev_file_private_data_t *private_data 
-+		= (gntdev_file_private_data_t *) flip->private_data;
-+	
-+	int i;
-+	int start_index = private_data->next_fit_index;
-+	int range_start = 0, range_length;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry, *t;
++	struct config_field *field;
 +
-+	if (private_data->free_list_size < num_slots) {
-+		return -ENOMEM;
-+	}
++	dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
 +
-+	/* First search from the start_index to the end of the array. */
-+	range_length = 0;
-+	for (i = start_index; i < MAX_GRANTS; ++i) {
-+		if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
-+			if (range_length == 0) {
-+				range_start = i;
-+			}
-+			++range_length;
-+			if (range_length == num_slots) {
-+				return range_start;
-+			}
-+		}
-+	}
-+	
-+	/* Now search from the start of the array to the start_index. */
-+	range_length = 0;
-+	for (i = 0; i < start_index; ++i) {
-+		if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
-+			if (range_length == 0) {
-+				range_start = i;
-+			}
-+			++range_length;
-+			if (range_length == num_slots) {
-+				return range_start;
-+			}
-+		}
++	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++		list_del(&cfg_entry->list);
++
++		field = cfg_entry->field;
++
++		if (field->release)
++			field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
++
++		kfree(cfg_entry);
 +	}
-+	
-+	return -ENOMEM;
 +}
 +
-+/* Interface functions. */
-+
-+/* Initialises the driver. Called when the module is loaded. */
-+static int __init gntdev_init(void)
++int pciback_config_add_field_offset(struct pci_dev *dev,
++				    struct config_field *field,
++				    unsigned int base_offset)
 +{
-+	struct class *class;
-+	struct class_device *device;
++	int err = 0;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry;
++	void *tmp;
 +
-+	if (!is_running_on_xen()) {
-+		printk(KERN_ERR "You must be running Xen to use gntdev\n");
-+		return -ENODEV;
++	cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
++	if (!cfg_entry) {
++		err = -ENOMEM;
++		goto out;
 +	}
 +
-+	gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
-+	if (gntdev_major < 0)
-+	{
-+		printk(KERN_ERR "Could not register gntdev device\n");
-+		return -ENOMEM;
-+	}
++	cfg_entry->data = NULL;
++	cfg_entry->field = field;
++	cfg_entry->base_offset = base_offset;
 +
-+	/* Note that if the sysfs code fails, we will still initialise the
-+	 * device, and output the major number so that the device can be
-+	 * created manually using mknod.
-+	 */
-+	if ((class = get_xen_class()) == NULL) {
-+		printk(KERN_ERR "Error setting up xen_class\n");
-+		printk(KERN_ERR "gntdev created with major number = %d\n", 
-+		       gntdev_major);
-+		return 0;
-+	}
++	/* silently ignore duplicate fields */
++	err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
++	if (err)
++		goto out;
++
++	if (field->init) {
++		tmp = field->init(dev, OFFSET(cfg_entry));
++
++		if (IS_ERR(tmp)) {
++			err = PTR_ERR(tmp);
++			goto out;
++		}
 +
-+	device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
-+				     NULL, GNTDEV_NAME);
-+	if (IS_ERR(device)) {
-+		printk(KERN_ERR "Error creating gntdev device in xen_class\n");
-+		printk(KERN_ERR "gntdev created with major number = %d\n",
-+		       gntdev_major);
-+		return 0;
++		cfg_entry->data = tmp;
 +	}
 +
-+	return 0;
-+}
++	dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
++		OFFSET(cfg_entry));
++	list_add_tail(&cfg_entry->list, &dev_data->config_fields);
 +
-+/* Cleans up and unregisters the driver. Called when the driver is unloaded.
-+ */
-+static void __exit gntdev_exit(void)
-+{
-+	struct class *class;
-+	if ((class = get_xen_class()) != NULL)
-+		class_device_destroy(class, MKDEV(gntdev_major, 0));
-+	unregister_chrdev(gntdev_major, GNTDEV_NAME);
++      out:
++	if (err)
++		kfree(cfg_entry);
++
++	return err;
 +}
 +
-+/* Called when the device is opened. */
-+static int gntdev_open(struct inode *inode, struct file *flip)
++/* This sets up the device's virtual configuration space to keep track of 
++ * certain registers (like the base address registers (BARs) so that we can
++ * keep the client from manipulating them directly.
++ */
++int pciback_config_init_dev(struct pci_dev *dev)
 +{
-+	gntdev_file_private_data_t *private_data;
-+	int i;
-+
-+	try_module_get(THIS_MODULE);
-+
-+	/* Allocate space for the per-instance private data. */
-+	private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
-+	if (!private_data)
-+		goto nomem_out;
++	int err = 0;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
 +
-+	/* Allocate space for the kernel-mapping of granted pages. */
-+	private_data->foreign_pages = 
-+		alloc_empty_pages_and_pagevec(MAX_GRANTS);
-+	if (!private_data->foreign_pages)
-+		goto nomem_out2;
++	dev_dbg(&dev->dev, "initializing virtual configuration space\n");
 +
-+	/* Initialise the free-list, which contains all slots at first.
-+	 */
-+	for (i = 0; i < MAX_GRANTS; ++i) {
-+		private_data->free_list[MAX_GRANTS - i - 1] = i;
-+		private_data->grants[i].state = GNTDEV_SLOT_INVALID;
-+		private_data->grants[i].u.free_list_index = MAX_GRANTS - i - 1;
-+	}
-+	private_data->free_list_size = MAX_GRANTS;
-+	private_data->next_fit_index = 0;
++	INIT_LIST_HEAD(&dev_data->config_fields);
 +
-+	init_rwsem(&private_data->grants_sem);
-+	init_rwsem(&private_data->free_list_sem);
++	err = pciback_config_header_add_fields(dev);
++	if (err)
++		goto out;
 +
-+	flip->private_data = private_data;
++	err = pciback_config_capability_add_fields(dev);
++	if (err)
++		goto out;
 +
-+	return 0;
++	err = pciback_config_quirks_init(dev);
 +
-+nomem_out2:
-+	kfree(private_data);
-+nomem_out:
-+	return -ENOMEM;
++      out:
++	return err;
 +}
 +
-+/* Called when the device is closed.
-+ */
-+static int gntdev_release(struct inode *inode, struct file *flip)
++int pciback_config_init(void)
 +{
-+	if (flip->private_data) {
-+		gntdev_file_private_data_t *private_data = 
-+			(gntdev_file_private_data_t *) flip->private_data;
-+		if (private_data->foreign_pages) {
-+			free_empty_pages_and_pagevec
-+				(private_data->foreign_pages, MAX_GRANTS);
-+		}
-+		kfree(private_data);
-+	}
-+	module_put(THIS_MODULE);
-+	return 0;
++	return pciback_config_capability_init();
 +}
-+
-+/* Called when an attempt is made to mmap() the device. The private data from
-+ * @flip contains the list of grant references that can be mapped. The vm_pgoff
-+ * field of @vma contains the index into that list that refers to the grant
-+ * reference that will be mapped. Only mappings that are a multiple of
-+ * PAGE_SIZE are handled.
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Common data structures for overriding the configuration space
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
 + */
-+static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma) 
-+{
-+	struct gnttab_map_grant_ref op;
-+	unsigned long slot_index = vma->vm_pgoff;
-+	unsigned long kernel_vaddr, user_vaddr;
-+	uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+	uint64_t ptep;
-+	int ret;
-+	int flags;
-+	int i;
-+	struct page *page;
-+	gntdev_file_private_data_t *private_data = flip->private_data;
 +
-+	if (unlikely(!private_data)) {
-+		printk(KERN_ERR "File's private data is NULL.\n");
-+		return -EINVAL;
-+	}
++#ifndef __XEN_PCIBACK_CONF_SPACE_H__
++#define __XEN_PCIBACK_CONF_SPACE_H__
 +
-+	if (unlikely((size <= 0) || (size + slot_index) > MAX_GRANTS)) {
-+		printk(KERN_ERR "Invalid number of pages or offset"
-+		       "(num_pages = %d, first_slot = %ld).\n",
-+		       size, slot_index);
-+		return -ENXIO;
-+	}
++#include <linux/list.h>
++#include <linux/err.h>
 +
-+	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
-+		printk(KERN_ERR "Writable mappings must be shared.\n");
-+		return -EINVAL;
-+	}
++/* conf_field_init can return an errno in a ptr with ERR_PTR() */
++typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
++typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
++typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
 +
-+	/* Slots must be in the NOT_YET_MAPPED state. */
-+	down_write(&private_data->grants_sem);
-+	for (i = 0; i < size; ++i) {
-+		if (private_data->grants[slot_index + i].state != 
-+		    GNTDEV_SLOT_NOT_YET_MAPPED) {
-+			printk(KERN_ERR "Slot (index = %ld) is in the wrong "
-+			       "state (%d).\n", slot_index + i, 
-+			       private_data->grants[slot_index + i].state);
-+			up_write(&private_data->grants_sem);
-+			return -EINVAL;
-+		}
-+	}
++typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
++				 void *data);
++typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
++				void *data);
++typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
++				void *data);
++typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
++				void *data);
++typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
++			       void *data);
++typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
++			       void *data);
 +
-+	/* Install the hook for unmapping. */
-+	vma->vm_ops = &gntdev_vmops;
-+    
-+	/* The VM area contains pages from another VM. */
-+	vma->vm_flags |= VM_FOREIGN;
-+	vma->vm_private_data = kzalloc(size * sizeof(struct page_struct *), 
-+				       GFP_KERNEL);
-+	if (vma->vm_private_data == NULL) {
-+		printk(KERN_ERR "Couldn't allocate mapping structure for VM "
-+		       "area.\n");
-+		return -ENOMEM;
-+	}
++/* These are the fields within the configuration space which we
++ * are interested in intercepting reads/writes to and changing their
++ * values.
++ */
++struct config_field {
++	unsigned int offset;
++	unsigned int size;
++	unsigned int mask;
++	conf_field_init init;
++	conf_field_reset reset;
++	conf_field_free release;
++	void (*clean) (struct config_field * field);
++	union {
++		struct {
++			conf_dword_write write;
++			conf_dword_read read;
++		} dw;
++		struct {
++			conf_word_write write;
++			conf_word_read read;
++		} w;
++		struct {
++			conf_byte_write write;
++			conf_byte_read read;
++		} b;
++	} u;
++	struct list_head list;
++};
 +
-+	/* This flag prevents Bad PTE errors when the memory is unmapped. */
-+	vma->vm_flags |= VM_RESERVED;
++struct config_field_entry {
++	struct list_head list;
++	struct config_field *field;
++	unsigned int base_offset;
++	void *data;
++};
 +
-+	/* This flag prevents this VM area being copied on a fork(). A better
-+	 * behaviour might be to explicitly carry out the appropriate mappings
-+	 * on fork(), but I don't know if there's a hook for this.
-+	 */
-+	vma->vm_flags |= VM_DONTCOPY;
++#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
 +
-+#ifdef CONFIG_X86
-+	/* This flag ensures that the page tables are not unpinned before the
-+	 * VM area is unmapped. Therefore Xen still recognises the PTE as
-+	 * belonging to an L1 pagetable, and the grant unmap operation will
-+	 * succeed, even if the process does not exit cleanly.
-+	 */
-+	vma->vm_mm->context.has_foreign_mappings = 1;
-+#endif
++/* Add fields to a device - the add_fields macro expects to get a pointer to
++ * the first entry in an array (of which the ending is marked by size==0)
++ */
++int pciback_config_add_field_offset(struct pci_dev *dev,
++				    struct config_field *field,
++				    unsigned int offset);
 +
-+	for (i = 0; i < size; ++i) {
++static inline int pciback_config_add_field(struct pci_dev *dev,
++					   struct config_field *field)
++{
++	return pciback_config_add_field_offset(dev, field, 0);
++}
 +
-+		flags = GNTMAP_host_map;
-+		if (!(vma->vm_flags & VM_WRITE))
-+			flags |= GNTMAP_readonly;
++static inline int pciback_config_add_fields(struct pci_dev *dev,
++					    struct config_field *field)
++{
++	int i, err = 0;
++	for (i = 0; field[i].size != 0; i++) {
++		err = pciback_config_add_field(dev, &field[i]);
++		if (err)
++			break;
++	}
++	return err;
++}
 +
-+		kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
-+		user_vaddr = get_user_vaddr(vma, i);
-+		page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
++static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
++						   struct config_field *field,
++						   unsigned int offset)
++{
++	int i, err = 0;
++	for (i = 0; field[i].size != 0; i++) {
++		err = pciback_config_add_field_offset(dev, &field[i], offset);
++		if (err)
++			break;
++	}
++	return err;
++}
 +
-+		gnttab_set_map_op(&op, kernel_vaddr, flags,   
-+				  private_data->grants[slot_index+i]
-+				  .u.valid.ref, 
-+				  private_data->grants[slot_index+i]
-+				  .u.valid.domid);
++/* Read/Write the real configuration space */
++int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
++			     void *data);
++int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
++			     void *data);
++int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
++			      void *data);
++int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
++			      void *data);
++int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
++			      void *data);
++int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
++			       void *data);
 +
-+		/* Carry out the mapping of the grant reference. */
-+		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 
-+						&op, 1);
-+		BUG_ON(ret);
-+		if (op.status) {
-+			printk(KERN_ERR "Error mapping the grant reference "
-+			       "into the kernel (%d). domid = %d; ref = %d\n",
-+			       op.status,
-+			       private_data->grants[slot_index+i]
-+			       .u.valid.domid,
-+			       private_data->grants[slot_index+i]
-+			       .u.valid.ref);
-+			goto undo_map_out;
-+		}
++int pciback_config_capability_init(void);
 +
-+		/* Store a reference to the page that will be mapped into user
-+		 * space.
-+		 */
-+		((struct page **) vma->vm_private_data)[i] = page;
++int pciback_config_header_add_fields(struct pci_dev *dev);
++int pciback_config_capability_add_fields(struct pci_dev *dev);
 +
-+		/* Mark mapped page as reserved. */
-+		SetPageReserved(page);
++#endif				/* __XEN_PCIBACK_CONF_SPACE_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_capability.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_capability.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,71 @@
++/*
++ * PCI Backend - Handles the virtual fields found on the capability lists
++ *               in the configuration space.
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+		/* Record the grant handle, for use in the unmap operation. */
-+		private_data->grants[slot_index+i].u.valid.kernel_handle = 
-+			op.handle;
-+		private_data->grants[slot_index+i].u.valid.dev_bus_addr = 
-+			op.dev_bus_addr;
-+		
-+		private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
-+		private_data->grants[slot_index+i].u.valid.user_handle =
-+			GNTDEV_INVALID_HANDLE;
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_capability.h"
 +
-+		/* Now perform the mapping to user space. */
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++static LIST_HEAD(capabilities);
 +
-+			/* NOT USING SHADOW PAGE TABLES. */
-+			/* In this case, we map the grant(s) straight into user
-+			 * space.
-+			 */
++static struct config_field caplist_header[] = {
++	{
++	 .offset    = PCI_CAP_LIST_ID,
++	 .size      = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
++	 .u.w.read  = pciback_read_config_word,
++	 .u.w.write = NULL,
++	},
++	{
++	 .size = 0,
++	},
++};
 +
-+			/* Get the machine address of the PTE for the user 
-+			 *  page.
-+			 */
-+			if ((ret = create_lookup_pte_addr(vma->vm_mm, 
-+							  vma->vm_start 
-+							  + (i << PAGE_SHIFT), 
-+							  &ptep)))
-+			{
-+				printk(KERN_ERR "Error obtaining PTE pointer "
-+				       "(%d).\n", ret);
-+				goto undo_map_out;
-+			}
-+			
-+			/* Configure the map operation. */
-+		
-+			/* The reference is to be used by host CPUs. */
-+			flags = GNTMAP_host_map;
-+			
-+			/* Specifies a user space mapping. */
-+			flags |= GNTMAP_application_map;
-+			
-+			/* The map request contains the machine address of the
-+			 * PTE to update.
-+			 */
-+			flags |= GNTMAP_contains_pte;
-+			
-+			if (!(vma->vm_flags & VM_WRITE))
-+				flags |= GNTMAP_readonly;
++static inline void register_capability(struct pciback_config_capability *cap)
++{
++	list_add_tail(&cap->cap_list, &capabilities);
++}
 +
-+			gnttab_set_map_op(&op, ptep, flags, 
-+					  private_data->grants[slot_index+i]
-+					  .u.valid.ref, 
-+					  private_data->grants[slot_index+i]
-+					  .u.valid.domid);
++int pciback_config_capability_add_fields(struct pci_dev *dev)
++{
++	int err = 0;
++	struct pciback_config_capability *cap;
++	int cap_offset;
 +
-+			/* Carry out the mapping of the grant reference. */
-+			ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+							&op, 1);
-+			BUG_ON(ret);
-+			if (op.status) {
-+				printk(KERN_ERR "Error mapping the grant "
-+				       "reference into user space (%d). domid "
-+				       "= %d; ref = %d\n", op.status,
-+				       private_data->grants[slot_index+i].u
-+				       .valid.domid,
-+				       private_data->grants[slot_index+i].u
-+				       .valid.ref);
-+				goto undo_map_out;
-+			}
-+			
-+			/* Record the grant handle, for use in the unmap 
-+			 * operation. 
-+			 */
-+			private_data->grants[slot_index+i].u.
-+				valid.user_handle = op.handle;
++	list_for_each_entry(cap, &capabilities, cap_list) {
++		cap_offset = pci_find_capability(dev, cap->capability);
++		if (cap_offset) {
++			dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
++				cap->capability, cap_offset);
 +
-+			/* Update p2m structure with the new mapping. */
-+			set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
-+					    FOREIGN_FRAME(private_data->
-+							  grants[slot_index+i]
-+							  .u.valid.dev_bus_addr
-+							  >> PAGE_SHIFT));
-+		} else {
-+			/* USING SHADOW PAGE TABLES. */
-+			/* In this case, we simply insert the page into the VM
-+			 * area. */
-+			ret = vm_insert_page(vma, user_vaddr, page);
++			err = pciback_config_add_fields_offset(dev,
++							       caplist_header,
++							       cap_offset);
++			if (err)
++				goto out;
++			err = pciback_config_add_fields_offset(dev,
++							       cap->fields,
++							       cap_offset);
++			if (err)
++				goto out;
 +		}
-+
 +	}
 +
-+	up_write(&private_data->grants_sem);
-+	return 0;
-+
-+undo_map_out:
-+	/* If we have a mapping failure, the unmapping will be taken care of
-+	 * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
-+	 * All we need to do here is free the vma_private_data.
-+	 */
-+	kfree(vma->vm_private_data);
-+
-+	/* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
-+	 * to NULL on failure. However, we need this in gntdev_clear_pte() to
-+	 * unmap the grants. Therefore, we smuggle a reference to the file's
-+	 * private data in the VM area's private data pointer.
-+	 */
-+	vma->vm_private_data = private_data;
-+	
-+	up_write(&private_data->grants_sem);
-+
-+	return -ENOMEM;
++      out:
++	return err;
 +}
 +
-+static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
-+			      pte_t *ptep, int is_fullmm)
-+{
-+	int slot_index, ret;
-+	pte_t copy;
-+	struct gnttab_unmap_grant_ref op;
-+	gntdev_file_private_data_t *private_data;
-+
-+	/* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
-+	 * to NULL on failure. However, we need this in gntdev_clear_pte() to
-+	 * unmap the grants. Therefore, we smuggle a reference to the file's
-+	 * private data in the VM area's private data pointer.
-+	 */
-+	if (vma->vm_file) {
-+		private_data = (gntdev_file_private_data_t *)
-+			vma->vm_file->private_data;
-+	} else if (vma->vm_private_data) {
-+		private_data = (gntdev_file_private_data_t *)
-+			vma->vm_private_data;
-+	} else {
-+		private_data = NULL; /* gcc warning */
-+		BUG();
-+	}
++extern struct pciback_config_capability pciback_config_capability_vpd;
++extern struct pciback_config_capability pciback_config_capability_pm;
 +
-+	/* Copy the existing value of the PTE for returning. */
-+	copy = *ptep;
++int pciback_config_capability_init(void)
++{
++	register_capability(&pciback_config_capability_vpd);
++	register_capability(&pciback_config_capability_pm);
 +
-+	/* Calculate the grant relating to this PTE. */
-+	slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
++	return 0;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_capability.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_capability.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,23 @@
++/*
++ * PCI Backend - Data structures for special overlays for structures on
++ *               the capability list.
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+	/* Only unmap grants if the slot has been mapped. This could be being
-+	 * called from a failing mmap().
-+	 */
-+	if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
++#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
++#define __PCIBACK_CONFIG_CAPABILITY_H__
 +
-+		/* First, we clear the user space mapping, if it has been made.
-+		 */
-+		if (private_data->grants[slot_index].u.valid.user_handle !=
-+		    GNTDEV_INVALID_HANDLE && 
-+		    !xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* NOT USING SHADOW PAGE TABLES. */
-+			gnttab_set_unmap_op(&op, virt_to_machine(ptep), 
-+					    GNTMAP_contains_pte,
-+					    private_data->grants[slot_index]
-+					    .u.valid.user_handle);
-+			ret = HYPERVISOR_grant_table_op(
-+				GNTTABOP_unmap_grant_ref, &op, 1);
-+			BUG_ON(ret);
-+			if (op.status)
-+				printk("User unmap grant status = %d\n", 
-+				       op.status);
-+		} else {
-+			/* USING SHADOW PAGE TABLES. */
-+			pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
-+		}
++#include <linux/pci.h>
++#include <linux/list.h>
 +
-+		/* Finally, we unmap the grant from kernel space. */
-+		gnttab_set_unmap_op(&op, 
-+				    get_kernel_vaddr(private_data, slot_index),
-+				    GNTMAP_host_map, 
-+				    private_data->grants[slot_index].u.valid
-+				    .kernel_handle);
-+		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 
-+						&op, 1);
-+		BUG_ON(ret);
-+		if (op.status)
-+			printk("Kernel unmap grant status = %d\n", op.status);
++struct pciback_config_capability {
++	struct list_head cap_list;
 +
++	int capability;
 +
-+		/* Return slot to the not-yet-mapped state, so that it may be
-+		 * mapped again, or removed by a subsequent ioctl.
-+		 */
-+		private_data->grants[slot_index].state = 
-+			GNTDEV_SLOT_NOT_YET_MAPPED;
++	/* If the device has the capability found above, add these fields */
++	struct config_field *fields;
++};
 +
-+		/* Invalidate the physical to machine mapping for this page. */
-+		set_phys_to_machine(__pa(get_kernel_vaddr(private_data, 
-+							  slot_index)) 
-+				    >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_capability_msi.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_capability_msi.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,79 @@
++/*
++ * PCI Backend -- Configuration overlay for MSI capability
++ */
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++#include <xen/interface/io/pciif.h>
++#include "pciback.h"
 +
-+	} else {
-+		pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++int pciback_enable_msi(struct pciback_device *pdev,
++		struct pci_dev *dev, struct xen_pci_op *op)
++{
++	int otherend = pdev->xdev->otherend_id;
++	int status;
++
++	status = pci_enable_msi(dev);
++
++	if (status) {
++		printk("error enable msi for guest %x status %x\n", otherend, status);
++		op->value = 0;
++		return XEN_PCI_ERR_op_failed;
 +	}
 +
-+	return copy;
++	op->value = dev->irq;
++	return 0;
 +}
 +
-+/* "Destructor" for a VM area.
-+ */
-+static void gntdev_vma_close(struct vm_area_struct *vma) {
-+	if (vma->vm_private_data) {
-+		kfree(vma->vm_private_data);
-+	}
++int pciback_disable_msi(struct pciback_device *pdev,
++		struct pci_dev *dev, struct xen_pci_op *op)
++{
++	pci_disable_msi(dev);
++
++	op->value = dev->irq;
++	return 0;
 +}
 +
-+/* Called when an ioctl is made on the device.
-+ */
-+static long gntdev_ioctl(struct file *flip,
-+			 unsigned int cmd, unsigned long arg)
++int pciback_enable_msix(struct pciback_device *pdev,
++		struct pci_dev *dev, struct xen_pci_op *op)
 +{
-+	int rc = 0;
-+	gntdev_file_private_data_t *private_data = 
-+		(gntdev_file_private_data_t *) flip->private_data;
++	int i, result;
++	struct msix_entry *entries;
 +
-+	switch (cmd) {
-+	case IOCTL_GNTDEV_MAP_GRANT_REF:
-+	{
-+		struct ioctl_gntdev_map_grant_ref op;
-+		down_write(&private_data->grants_sem);
-+		down_write(&private_data->free_list_sem);
++	if (op->value > SH_INFO_MAX_VEC)
++		return -EINVAL;
 +
-+		if ((rc = copy_from_user(&op, (void __user *) arg, 
-+					 sizeof(op)))) {
-+			rc = -EFAULT;
-+			goto map_out;
-+		}
-+		if (unlikely(op.count <= 0)) {
-+			rc = -EINVAL;
-+			goto map_out;
-+		}
++	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
++	if (entries == NULL)
++		return -ENOMEM;
 +
-+		if (op.count == 1) {
-+			if ((rc = add_grant_reference(flip, &op.refs[0],
-+						      &op.index)) < 0) {
-+				printk(KERN_ERR "Adding grant reference "
-+				       "failed (%d).\n", rc);
-+				goto map_out;
-+			}
-+		} else {
-+			struct ioctl_gntdev_grant_ref *refs, *u;
-+			refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
-+			if (!refs) {
-+				rc = -ENOMEM;
-+				goto map_out;
-+			}
-+			u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
-+			if ((rc = copy_from_user(refs,
-+						 (void __user *)u,
-+						 sizeof(*refs) * op.count))) {
-+				printk(KERN_ERR "Copying refs from user failed"
-+				       " (%d).\n", rc);
-+				rc = -EINVAL;
-+				goto map_out;
-+			}
-+			if ((rc = find_contiguous_free_range(flip, op.count))
-+			    < 0) {
-+				printk(KERN_ERR "Finding contiguous range "
-+				       "failed (%d).\n", rc);
-+				kfree(refs);
-+				goto map_out;
-+			}
-+			op.index = rc << PAGE_SHIFT;
-+			if ((rc = add_grant_references(flip, op.count,
-+						       refs, rc))) {
-+				printk(KERN_ERR "Adding grant references "
-+				       "failed (%d).\n", rc);
-+				kfree(refs);
-+				goto map_out;
-+			}
-+			compress_free_list(flip);
-+			kfree(refs);
-+		}
-+		if ((rc = copy_to_user((void __user *) arg, 
-+				       &op, 
-+				       sizeof(op)))) {
-+			printk(KERN_ERR "Copying result back to user failed "
-+			       "(%d)\n", rc);
-+			rc = -EFAULT;
-+			goto map_out;
-+		}
-+	map_out:
-+		up_write(&private_data->grants_sem);
-+		up_write(&private_data->free_list_sem);
-+		return rc;
++	for (i = 0; i < op->value; i++) {
++		entries[i].entry = op->msix_entries[i].entry;
++		entries[i].vector = op->msix_entries[i].vector;
 +	}
-+	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
-+	{
-+		struct ioctl_gntdev_unmap_grant_ref op;
-+		int i, start_index;
-+
-+		down_write(&private_data->grants_sem);
-+		down_write(&private_data->free_list_sem);
 +
-+		if ((rc = copy_from_user(&op, 
-+					 (void __user *) arg, 
-+					 sizeof(op)))) {
-+			rc = -EFAULT;
-+			goto unmap_out;
-+		}
++	result = pci_enable_msix(dev, entries, op->value);
 +
-+		start_index = op.index >> PAGE_SHIFT;
++	for (i = 0; i < op->value; i++) {
++		op->msix_entries[i].entry = entries[i].entry;
++		op->msix_entries[i].vector = entries[i].vector;
++	}
 +
-+		/* First, check that all pages are in the NOT_YET_MAPPED
-+		 * state.
-+		 */
-+		for (i = 0; i < op.count; ++i) {
-+			if (unlikely
-+			    (private_data->grants[start_index + i].state
-+			     != GNTDEV_SLOT_NOT_YET_MAPPED)) {
-+				if (private_data->grants[start_index + i].state
-+				    == GNTDEV_SLOT_INVALID) {
-+					printk(KERN_ERR
-+					       "Tried to remove an invalid "
-+					       "grant at offset 0x%x.",
-+					       (start_index + i) 
-+					       << PAGE_SHIFT);
-+					rc = -EINVAL;
-+				} else {
-+					printk(KERN_ERR
-+					       "Tried to remove a grant which "
-+					       "is currently mmap()-ed at "
-+					       "offset 0x%x.",
-+					       (start_index + i) 
-+					       << PAGE_SHIFT);
-+					rc = -EBUSY;
-+				}
-+				goto unmap_out;
-+			}
-+		}
++	kfree(entries);
 +
-+		/* Unmap pages and add them to the free list.
-+		 */
-+		for (i = 0; i < op.count; ++i) {
-+			private_data->grants[start_index+i].state = 
-+				GNTDEV_SLOT_INVALID;
-+			private_data->grants[start_index+i].u.free_list_index =
-+				private_data->free_list_size;
-+			private_data->free_list[private_data->free_list_size] =
-+				start_index + i;
-+			++private_data->free_list_size;
-+		}
-+		compress_free_list(flip);
++	op->value = result;
 +
-+	unmap_out:
-+		up_write(&private_data->grants_sem);
-+		up_write(&private_data->free_list_sem);
-+		return rc;
-+	}
-+	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
-+	{
-+		struct ioctl_gntdev_get_offset_for_vaddr op;
-+		struct vm_area_struct *vma;
-+		unsigned long vaddr;
++	return result;
++}
 +
-+		if ((rc = copy_from_user(&op, 
-+					 (void __user *) arg, 
-+					 sizeof(op)))) {
-+			rc = -EFAULT;
-+			goto get_offset_out;
-+		}
-+		vaddr = (unsigned long)op.vaddr;
++int pciback_disable_msix(struct pciback_device *pdev,
++		struct pci_dev *dev, struct xen_pci_op *op)
++{
 +
-+		down_read(&current->mm->mmap_sem);		
-+		vma = find_vma(current->mm, vaddr);
-+		if (vma == NULL) {
-+			rc = -EFAULT;
-+			goto get_offset_unlock_out;
-+		}
-+		if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
-+			printk(KERN_ERR "The vaddr specified does not belong "
-+			       "to a gntdev instance: %#lx\n", vaddr);
-+			rc = -EFAULT;
-+			goto get_offset_unlock_out;
-+		}
-+		if (vma->vm_start != vaddr) {
-+			printk(KERN_ERR "The vaddr specified in an "
-+			       "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
-+			       "the start of the VM area. vma->vm_start = "
-+			       "%#lx; vaddr = %#lx\n",
-+			       vma->vm_start, vaddr);
-+			rc = -EFAULT;
-+			goto get_offset_unlock_out;
-+		}
-+		op.offset = vma->vm_pgoff << PAGE_SHIFT;
-+		op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+		up_read(&current->mm->mmap_sem);
-+		if ((rc = copy_to_user((void __user *) arg, 
-+				       &op, 
-+				       sizeof(op)))) {
-+			rc = -EFAULT;
-+			goto get_offset_out;
-+		}
-+		goto get_offset_out;
-+	get_offset_unlock_out:
-+		up_read(&current->mm->mmap_sem);
-+	get_offset_out:
-+		return rc;
-+	}
-+	default:
-+		return -ENOIOCTLCMD;
-+	}
++	pci_disable_msix(dev);
 +
++	op->value = dev->irq;
 +	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/gntdev/Makefile tmp-linux-2.6-xen.patch/drivers/xen/gntdev/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/gntdev/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/gntdev/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1 @@
-+obj-y	:= gntdev.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/Kconfig tmp-linux-2.6-xen.patch/drivers/xen/Kconfig
---- pristine-linux-2.6.18.2/drivers/xen/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/Kconfig	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,277 @@
-+#
-+# This Kconfig describe xen options
-+#
 +
-+mainmenu "Xen Configuration"
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_capability_pm.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_capability_pm.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,128 @@
++/*
++ * PCI Backend - Configuration space overlay for power management
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+config XEN
-+	bool
-+	default y if X86_XEN || X86_64_XEN
-+	help
-+	  This is the Linux Xen port.
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
 +
-+if XEN
-+config XEN_INTERFACE_VERSION
-+	hex
-+	default 0x00030205
++static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
++			void *data)
++{
++	int err;
++	u16 real_value;
 +
-+menu "XEN"
++	err = pci_read_config_word(dev, offset, &real_value);
++	if (err)
++		goto out;
 +
-+config XEN_PRIVILEGED_GUEST
-+	bool "Privileged Guest (domain 0)"
-+	depends XEN
-+	default n
-+	help
-+	  Support for privileged operation (domain 0)
++	*value = real_value & ~PCI_PM_CAP_PME_MASK;
 +
-+config XEN_UNPRIVILEGED_GUEST
-+	bool
-+	default !XEN_PRIVILEGED_GUEST
++      out:
++	return err;
++}
 +
-+config XEN_PRIVCMD
-+	bool
-+	depends on PROC_FS
-+	default y
++/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
++ * Can't allow driver domain to enable PMEs - they're shared */
++#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
 +
-+config XEN_XENBUS_DEV
-+	bool
-+	depends on PROC_FS
-+	default y
++static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
++			 void *data)
++{
++	int err;
++	u16 old_value;
++	pci_power_t new_state, old_state;
 +
-+config XEN_BACKEND
-+        tristate "Backend driver support"
-+        default y
-+        help
-+          Support for backend device drivers that provide I/O services
-+          to other virtual machines.
++	err = pci_read_config_word(dev, offset, &old_value);
++	if (err)
++		goto out;
 +
-+config XEN_BLKDEV_BACKEND
-+	tristate "Block-device backend driver"
-+        depends on XEN_BACKEND
-+	default y
-+	help
-+	  The block-device backend driver allows the kernel to export its
-+	  block devices to other guests via a high-performance shared-memory
-+	  interface.
++	old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
++	new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 +
-+config XEN_BLKDEV_TAP
-+	tristate "Block-device tap backend driver"
-+	depends on XEN_BACKEND
-+	default XEN_PRIVILEGED_GUEST
-+	help
-+	  The block tap driver is an alternative to the block back driver 
-+          and allows VM block requests to be redirected to userspace through
-+          a device interface.  The tap allows user-space development of 
-+          high-performance block backends, where disk images may be implemented
-+          as files, in memory, or on other hosts across the network.  This 
-+	  driver can safely coexist with the existing blockback driver.
++	new_value &= PM_OK_BITS;
++	if ((old_value & PM_OK_BITS) != new_value) {
++		new_value = (old_value & ~PM_OK_BITS) | new_value;
++		err = pci_write_config_word(dev, offset, new_value);
++		if (err)
++			goto out;
++	}
 +
-+config XEN_NETDEV_BACKEND
-+	tristate "Network-device backend driver"
-+        depends on XEN_BACKEND && NET
-+	default y
-+	help
-+	  The network-device backend driver allows the kernel to export its
-+	  network devices to other guests via a high-performance shared-memory
-+	  interface.
++	/* Let pci core handle the power management change */
++	dev_dbg(&dev->dev, "set power state to %x\n", new_state);
++	err = pci_set_power_state(dev, new_state);
++	if (err) {
++		err = PCIBIOS_SET_FAILED;
++		goto out;
++	}
 +
-+config XEN_NETDEV_PIPELINED_TRANSMITTER
-+	bool "Pipelined transmitter (DANGEROUS)"
-+	depends on XEN_NETDEV_BACKEND
-+	default n
-+	help
-+	  If the net backend is a dumb domain, such as a transparent Ethernet
-+	  bridge with no local IP interface, it is safe to say Y here to get
-+	  slightly lower network overhead.
-+	  If the backend has a local IP interface; or may be doing smart things
-+	  like reassembling packets to perform firewall filtering; or if you
-+	  are unsure; or if you experience network hangs when this option is
-+	  enabled; then you must say N here.
++	/*
++	 * Device may lose PCI config info on D3->D0 transition. This
++	 * is a problem for some guests which will not reset BARs. Even
++	 * those that have a go will be foiled by our BAR-write handler
++	 * which will discard the write! Since Linux won't re-init
++	 * the config space automatically in all cases, we do it here.
++	 * Future: Should we re-initialise all first 64 bytes of config space?
++	 */
++	if (new_state == PCI_D0 &&
++	    (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
++	    !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
++		pci_restore_bars(dev);
 +
-+config XEN_NETDEV_LOOPBACK
-+	tristate "Network-device loopback driver"
-+	depends on XEN_NETDEV_BACKEND
-+	default y
-+	help
-+	  A two-interface loopback device to emulate a local netfront-netback
-+	  connection.
++ out:
++	return err;
++}
 +
-+config XEN_PCIDEV_BACKEND
-+	tristate "PCI-device backend driver"
-+	depends on PCI && XEN_BACKEND
-+	default XEN_PRIVILEGED_GUEST
-+	help
-+	  The PCI device backend driver allows the kernel to export arbitrary
-+	  PCI devices to other guests. If you select this to be a module, you
-+	  will need to make sure no other driver has bound to the device(s)
-+	  you want to make visible to other guests.
++/* Ensure PMEs are disabled */
++static void *pm_ctrl_init(struct pci_dev *dev, int offset)
++{
++	int err;
++	u16 value;
 +
-+choice
-+	prompt "PCI Backend Mode"
-+	depends on XEN_PCIDEV_BACKEND
-+	default XEN_PCIDEV_BACKEND_VPCI if !IA64
-+	default XEN_PCIDEV_BACKEND_CONTROLLER if IA64
++	err = pci_read_config_word(dev, offset, &value);
++	if (err)
++		goto out;
 +
-+config XEN_PCIDEV_BACKEND_VPCI
-+	bool "Virtual PCI"
-+	---help---
-+	  This PCI Backend hides the true PCI topology and makes the frontend
-+	  think there is a single PCI bus with only the exported devices on it.
-+	  For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
-+	  second device at 02:1a.1 will be re-assigned to 00:01.1.
++	if (value & PCI_PM_CTRL_PME_ENABLE) {
++		value &= ~PCI_PM_CTRL_PME_ENABLE;
++		err = pci_write_config_word(dev, offset, value);
++	}
 +
-+config XEN_PCIDEV_BACKEND_PASS
-+	bool "Passthrough"
-+	---help---
-+	  This PCI Backend provides a real view of the PCI topology to the
-+	  frontend (for example, a device at 06:01.b will still appear at
-+	  06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
-+	  PCI devices to its driver domains. This may be required for drivers
-+	  which depend on finding their hardward in certain bus/slot
-+	  locations.
++      out:
++	return ERR_PTR(err);
++}
 +
-+config XEN_PCIDEV_BACKEND_SLOT
-+	bool "Slot"
-+	---help---
-+	  This PCI Backend hides the true PCI topology and makes the frontend
-+	  think there is a single PCI bus with only the exported devices on it.
-+	  Contrary to the virtual PCI backend, a function becomes a new slot.
-+	  For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
-+	  second device at 02:1a.1 will be re-assigned to 00:01.0.
++static struct config_field caplist_pm[] = {
++	{
++		.offset     = PCI_PM_PMC,
++		.size       = 2,
++		.u.w.read   = pm_caps_read,
++	},
++	{
++		.offset     = PCI_PM_CTRL,
++		.size       = 2,
++		.init       = pm_ctrl_init,
++		.u.w.read   = pciback_read_config_word,
++		.u.w.write  = pm_ctrl_write,
++	},
++	{
++		.offset     = PCI_PM_PPB_EXTENSIONS,
++		.size       = 1,
++		.u.b.read   = pciback_read_config_byte,
++	},
++	{
++		.offset     = PCI_PM_DATA_REGISTER,
++		.size       = 1,
++		.u.b.read   = pciback_read_config_byte,
++	},
++	{
++		.size = 0,
++	},
++};
++
++struct pciback_config_capability pciback_config_capability_pm = {
++	.capability = PCI_CAP_ID_PM,
++	.fields = caplist_pm,
++};
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_capability_vpd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_capability_vpd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,42 @@
++/*
++ * PCI Backend - Configuration space overlay for Vital Product Data
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
++			     void *data)
++{
++	/* Disallow writes to the vital product data */
++	if (value & PCI_VPD_ADDR_F)
++		return PCIBIOS_SET_FAILED;
++	else
++		return pci_write_config_word(dev, offset, value);
++}
++
++static struct config_field caplist_vpd[] = {
++	{
++	 .offset    = PCI_VPD_ADDR,
++	 .size      = 2,
++	 .u.w.read  = pciback_read_config_word,
++	 .u.w.write = vpd_address_write,
++	 },
++	{
++	 .offset     = PCI_VPD_DATA,
++	 .size       = 4,
++	 .u.dw.read  = pciback_read_config_dword,
++	 .u.dw.write = NULL,
++	 },
++	{
++	 .size = 0,
++	 },
++};
++ 
++struct pciback_config_capability pciback_config_capability_vpd = {
++	.capability = PCI_CAP_ID_VPD,
++	.fields = caplist_vpd,
++};
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_header.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_header.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,323 @@
++/*
++ * PCI Backend - Handles the virtual fields in the configuration space headers.
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+config XEN_PCIDEV_BACKEND_CONTROLLER
-+	bool "Controller"
-+	depends on IA64
-+	---help---
-+	  This PCI backend virtualizes the PCI bus topology by providing a
-+	  virtual bus per PCI root device.  Devices which are physically under
-+	  the same root bus will appear on the same virtual bus.  For systems
-+	  with complex I/O addressing, this is the only backend which supports
-+	  extended I/O port spaces and MMIO translation offsets.  This backend
-+	  also supports slot virtualization.  For example, a device at
-+	  0000:01:02.1 will be re-assigned to 0000:00:00.0.  A second device
-+	  at 0000:02:05.0 (behind a P2P bridge on bus 0000:01) will be
-+	  re-assigned to 0000:00:01.0.  A third device at 0000:16:05.0 (under
-+	  a different PCI root bus) will be re-assigned to 0000:01:00.0.
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
 +
-+endchoice
++struct pci_bar_info {
++	u32 val;
++	u32 len_val;
++	int which;
++};
 +
-+config XEN_PCIDEV_BE_DEBUG
-+	bool "PCI Backend Debugging"
-+	depends on XEN_PCIDEV_BACKEND
-+	default n
++#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
++#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
 +
-+config XEN_TPMDEV_BACKEND
-+	tristate "TPM-device backend driver"
-+        depends on XEN_BACKEND
-+	default n
-+	help
-+	  The TPM-device backend driver
++static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
++{
++	int err;
 +
-+config XEN_BLKDEV_FRONTEND
-+	tristate "Block-device frontend driver"
-+	depends on XEN
-+	default y
-+	help
-+	  The block-device frontend driver allows the kernel to access block
-+	  devices mounted within another guest OS. Unless you are building a
-+	  dedicated device-driver domain, or your master control domain
-+	  (domain 0), then you almost certainly want to say Y here.
++	if (!dev->is_enabled && is_enable_cmd(value)) {
++		if (unlikely(verbose_request))
++			printk(KERN_DEBUG "pciback: %s: enable\n",
++			       pci_name(dev));
++		err = pci_enable_device(dev);
++		if (err)
++			return err;
++	} else if (dev->is_enabled && !is_enable_cmd(value)) {
++		if (unlikely(verbose_request))
++			printk(KERN_DEBUG "pciback: %s: disable\n",
++			       pci_name(dev));
++		pci_disable_device(dev);
++	}
 +
-+config XEN_NETDEV_FRONTEND
-+	tristate "Network-device frontend driver"
-+	depends on XEN && NET
-+	default y
-+	help
-+	  The network-device frontend driver allows the kernel to access
-+	  network interfaces within another guest OS. Unless you are building a
-+	  dedicated device-driver domain, or your master control domain
-+	  (domain 0), then you almost certainly want to say Y here.
++	if (!dev->is_busmaster && is_master_cmd(value)) {
++		if (unlikely(verbose_request))
++			printk(KERN_DEBUG "pciback: %s: set bus master\n",
++			       pci_name(dev));
++		pci_set_master(dev);
++	}
 +
-+config XEN_FRAMEBUFFER
-+	tristate "Framebuffer-device frontend driver"
-+	depends on XEN && FB
-+	select FB_CFB_FILLRECT
-+	select FB_CFB_COPYAREA
-+	select FB_CFB_IMAGEBLIT
-+	default y
-+	help
-+	  The framebuffer-device frontend drivers allows the kernel to create a
-+	  virtual framebuffer.  This framebuffer can be viewed in another
-+	  domain.  Unless this domain has access to a real video card, you
-+	  probably want to say Y here.
++	if (value & PCI_COMMAND_INVALIDATE) {
++		if (unlikely(verbose_request))
++			printk(KERN_DEBUG
++			       "pciback: %s: enable memory-write-invalidate\n",
++			       pci_name(dev));
++		err = pci_set_mwi(dev);
++		if (err) {
++			printk(KERN_WARNING
++			       "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
++			       pci_name(dev), err);
++			value &= ~PCI_COMMAND_INVALIDATE;
++		}
++	}
 +
-+config XEN_KEYBOARD
-+	tristate "Keyboard-device frontend driver"
-+	depends on XEN && XEN_FRAMEBUFFER && INPUT
-+	default y
-+	help
-+	  The keyboard-device frontend driver allows the kernel to create a
-+	  virtual keyboard.  This keyboard can then be driven by another
-+	  domain.  If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
-+	  want to say Y here.
++	return pci_write_config_word(dev, offset, value);
++}
 +
-+config XEN_SCRUB_PAGES
-+	bool "Scrub memory before freeing it to Xen"
-+	default y
-+	help
-+	  Erase memory contents before freeing it back to Xen's global
-+	  pool. This ensures that any secrets contained within that
-+	  memory (e.g., private keys) cannot be found by other guests that
-+	  may be running on the machine. Most people will want to say Y here.
-+	  If security is not a concern then you may increase performance by
-+	  saying N.
++static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++	struct pci_bar_info *bar = data;
 +
-+config XEN_DISABLE_SERIAL
-+	bool "Disable serial port drivers"
-+	default y
-+	help
-+	  Disable serial port drivers, allowing the Xen console driver
-+	  to provide a serial console at ttyS0.
++	if (unlikely(!bar)) {
++		printk(KERN_WARNING "pciback: driver data not found for %s\n",
++		       pci_name(dev));
++		return XEN_PCI_ERR_op_failed;
++	}
 +
-+config XEN_SYSFS
-+	tristate "Export Xen attributes in sysfs"
-+	depends on SYSFS
-+	select SYS_HYPERVISOR
-+	default y
-+	help
-+	  Xen hypervisor attributes will show up under /sys/hypervisor/.
++	/* A write to obtain the length must happen as a 32-bit write.
++	 * This does not (yet) support writing individual bytes
++	 */
++	if (value == ~PCI_ROM_ADDRESS_ENABLE)
++		bar->which = 1;
++	else {
++		u32 tmpval;
++		pci_read_config_dword(dev, offset, &tmpval);
++		if (tmpval != bar->val && value == bar->val) {
++			/* Allow restoration of bar value. */
++			pci_write_config_dword(dev, offset, bar->val);
++		}
++		bar->which = 0;
++	}
 +
-+choice
-+	prompt "Xen version compatibility"
-+	default XEN_COMPAT_030002_AND_LATER
++	/* Do we need to support enabling/disabling the rom address here? */
 +
-+	config XEN_COMPAT_030002_AND_LATER
-+		bool "3.0.2 and later"
++	return 0;
++}
 +
-+	config XEN_COMPAT_030004_AND_LATER
-+		bool "3.0.4 and later"
++/* For the BARs, only allow writes which write ~0 or
++ * the correct resource information
++ * (Needed for when the driver probes the resource usage)
++ */
++static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++	struct pci_bar_info *bar = data;
 +
-+	config XEN_COMPAT_LATEST_ONLY
-+		bool "no compatibility code"
++	if (unlikely(!bar)) {
++		printk(KERN_WARNING "pciback: driver data not found for %s\n",
++		       pci_name(dev));
++		return XEN_PCI_ERR_op_failed;
++	}
 +
-+endchoice
++	/* A write to obtain the length must happen as a 32-bit write.
++	 * This does not (yet) support writing individual bytes
++	 */
++	if (value == ~0)
++		bar->which = 1;
++	else {
++		u32 tmpval;
++		pci_read_config_dword(dev, offset, &tmpval);
++		if (tmpval != bar->val && value == bar->val) {
++			/* Allow restoration of bar value. */
++			pci_write_config_dword(dev, offset, bar->val);
++		}
++		bar->which = 0;
++	}
 +
-+config XEN_COMPAT
-+	hex
-+	default 0xffffff if XEN_COMPAT_LATEST_ONLY
-+	default 0x030004 if XEN_COMPAT_030004_AND_LATER
-+	default 0x030002 if XEN_COMPAT_030002_AND_LATER
-+	default 0
++	return 0;
++}
 +
-+endmenu
++static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++{
++	struct pci_bar_info *bar = data;
 +
-+config HAVE_IRQ_IGNORE_UNHANDLED
-+	bool
-+	default y
++	if (unlikely(!bar)) {
++		printk(KERN_WARNING "pciback: driver data not found for %s\n",
++		       pci_name(dev));
++		return XEN_PCI_ERR_op_failed;
++	}
 +
-+config NO_IDLE_HZ
-+	bool
-+	default y
++	*value = bar->which ? bar->len_val : bar->val;
 +
-+config XEN_SMPBOOT
-+	bool
-+	default y
-+	depends on SMP
++	return 0;
++}
 +
-+endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/Makefile tmp-linux-2.6-xen.patch/drivers/xen/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/Makefile	2007-07-30 16:35:11.000000000 +0200
-@@ -0,0 +1,20 @@
-+obj-y	+= core/
-+obj-y	+= console/
-+obj-y	+= evtchn/
-+obj-y	+= privcmd/
-+obj-y	+= xenbus/
-+obj-y	+= gntdev/
-+obj-y	+= balloon/
-+obj-y	+= char/
++static inline void read_dev_bar(struct pci_dev *dev,
++				struct pci_bar_info *bar_info, int offset,
++				u32 len_mask)
++{
++	pci_read_config_dword(dev, offset, &bar_info->val);
++	pci_write_config_dword(dev, offset, len_mask);
++	pci_read_config_dword(dev, offset, &bar_info->len_val);
++	pci_write_config_dword(dev, offset, bar_info->val);
++}
 +
-+obj-y	+= util.o
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
-+obj-$(CONFIG_XEN_BLKDEV_TAP)		+= blktap/
-+obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmback/
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront/
-+obj-$(CONFIG_XEN_FRAMEBUFFER)		+= fbfront/
-+obj-$(CONFIG_XEN_KEYBOARD)		+= fbfront/
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/common.h tmp-linux-2.6-xen.patch/drivers/xen/netback/common.h
---- pristine-linux-2.6.18.2/drivers/xen/netback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/common.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,157 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/common.h
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++static void *bar_init(struct pci_dev *dev, int offset)
++{
++	struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
 +
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
++	if (!bar)
++		return ERR_PTR(-ENOMEM);
 +
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/ip.h>
-+#include <linux/in.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/wait.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/netif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
++	read_dev_bar(dev, bar, offset, ~0);
++	bar->which = 0;
 +
-+#define DPRINTK(_f, _a...)			\
-+	pr_debug("(file=%s, line=%d) " _f,	\
-+		 __FILE__ , __LINE__ , ## _a )
-+#define IPRINTK(fmt, args...)				\
-+	printk(KERN_INFO "xen_net: " fmt, ##args)
-+#define WPRINTK(fmt, args...)				\
-+	printk(KERN_WARNING "xen_net: " fmt, ##args)
++	return bar;
++}
 +
-+typedef struct netif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t          domid;
-+	unsigned int     handle;
++static void *rom_init(struct pci_dev *dev, int offset)
++{
++	struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
 +
-+	u8               fe_dev_addr[6];
++	if (!bar)
++		return ERR_PTR(-ENOMEM);
 +
-+	/* Physical parameters of the comms window. */
-+	grant_handle_t   tx_shmem_handle;
-+	grant_ref_t      tx_shmem_ref;
-+	grant_handle_t   rx_shmem_handle;
-+	grant_ref_t      rx_shmem_ref;
-+	unsigned int     irq;
++	read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
++	bar->which = 0;
 +
-+	/* The shared rings and indexes. */
-+	netif_tx_back_ring_t tx;
-+	netif_rx_back_ring_t rx;
-+	struct vm_struct *tx_comms_area;
-+	struct vm_struct *rx_comms_area;
++	return bar;
++}
 +
-+	/* Set of features that can be turned on in dev->features. */
-+	int features;
++static void bar_reset(struct pci_dev *dev, int offset, void *data)
++{
++	struct pci_bar_info *bar = data;
 +
-+	/* Internal feature information. */
-+	u8 can_queue:1;	/* can queue packets for receiver? */
-+	u8 copying_receiver:1;	/* copy packets to receiver?       */
++	bar->which = 0;
++}
 +
-+	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
-+	RING_IDX rx_req_cons_peek;
++static void bar_release(struct pci_dev *dev, int offset, void *data)
++{
++	kfree(data);
++}
 +
-+	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-+	unsigned long   credit_bytes;
-+	unsigned long   credit_usec;
-+	unsigned long   remaining_credit;
-+	struct timer_list credit_timeout;
++static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
++			  void *data)
++{
++	*value = (u8) dev->irq;
 +
-+	/* Enforce draining of the transmit queue. */
-+	struct timer_list tx_queue_timeout;
++	return 0;
++}
 +
-+	/* Miscellaneous private stuff. */
-+	struct list_head list;  /* scheduling list */
-+	atomic_t         refcnt;
-+	struct net_device *dev;
-+	struct net_device_stats stats;
++static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
++{
++	u8 cur_value;
++	int err;
 +
-+	unsigned int carrier;
++	err = pci_read_config_byte(dev, offset, &cur_value);
++	if (err)
++		goto out;
 +
-+	wait_queue_head_t waiting_to_free;
-+} netif_t;
++	if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
++	    || value == PCI_BIST_START)
++		err = pci_write_config_byte(dev, offset, value);
 +
-+/*
-+ * Implement our own carrier flag: the network stack's version causes delays
-+ * when the carrier is re-enabled (in particular, dev_activate() may not
-+ * immediately be called, which can cause packet loss; also the etherbridge
-+ * can be rather lazy in activating its port).
-+ */
-+#define netback_carrier_on(netif)	((netif)->carrier = 1)
-+#define netback_carrier_off(netif)	((netif)->carrier = 0)
-+#define netback_carrier_ok(netif)	((netif)->carrier)
++      out:
++	return err;
++}
 +
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++static struct config_field header_common[] = {
++	{
++	 .offset    = PCI_COMMAND,
++	 .size      = 2,
++	 .u.w.read  = pciback_read_config_word,
++	 .u.w.write = command_write,
++	},
++	{
++	 .offset    = PCI_INTERRUPT_LINE,
++	 .size      = 1,
++	 .u.b.read  = interrupt_read,
++	},
++	{
++	 .offset    = PCI_INTERRUPT_PIN,
++	 .size      = 1,
++	 .u.b.read  = pciback_read_config_byte,
++	},
++	{
++	 /* Any side effects of letting driver domain control cache line? */
++	 .offset    = PCI_CACHE_LINE_SIZE,
++	 .size      = 1,
++	 .u.b.read  = pciback_read_config_byte,
++	 .u.b.write = pciback_write_config_byte,
++	},
++	{
++	 .offset    = PCI_LATENCY_TIMER,
++	 .size      = 1,
++	 .u.b.read  = pciback_read_config_byte,
++	},
++	{
++	 .offset    = PCI_BIST,
++	 .size      = 1,
++	 .u.b.read  = pciback_read_config_byte,
++	 .u.b.write = bist_write,
++	},
++	{
++	 .size = 0,
++	},
++};
 +
-+void netif_disconnect(netif_t *netif);
++#define CFG_FIELD_BAR(reg_offset) 			\
++	{ 						\
++	 .offset     = reg_offset, 			\
++	 .size       = 4, 				\
++	 .init       = bar_init, 			\
++	 .reset      = bar_reset, 			\
++	 .release    = bar_release, 			\
++	 .u.dw.read  = bar_read, 			\
++	 .u.dw.write = bar_write, 			\
++	 }
 +
-+netif_t *netif_alloc(domid_t domid, unsigned int handle);
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+	      unsigned long rx_ring_ref, unsigned int evtchn);
++#define CFG_FIELD_ROM(reg_offset) 			\
++	{ 						\
++	 .offset     = reg_offset, 			\
++	 .size       = 4, 				\
++	 .init       = rom_init, 			\
++	 .reset      = bar_reset, 			\
++	 .release    = bar_release, 			\
++	 .u.dw.read  = bar_read, 			\
++	 .u.dw.write = rom_write, 			\
++	 }
 +
-+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define netif_put(_b)						\
-+	do {							\
-+		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
-+			wake_up(&(_b)->waiting_to_free);	\
-+	} while (0)
++static struct config_field header_0[] = {
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
++	CFG_FIELD_ROM(PCI_ROM_ADDRESS),
++	{
++	 .size = 0,
++	},
++};
 +
-+void netif_xenbus_init(void);
++static struct config_field header_1[] = {
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++	CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
++	{
++	 .size = 0,
++	},
++};
 +
-+#define netif_schedulable(netif)				\
-+	(netif_running((netif)->dev) && netback_carrier_ok(netif))
++int pciback_config_header_add_fields(struct pci_dev *dev)
++{
++	int err;
 +
-+void netif_schedule_work(netif_t *netif);
-+void netif_deschedule_work(netif_t *netif);
++	err = pciback_config_add_fields(dev, header_common);
++	if (err)
++		goto out;
 +
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++	switch (dev->hdr_type) {
++	case PCI_HEADER_TYPE_NORMAL:
++		err = pciback_config_add_fields(dev, header_0);
++		break;
++
++	case PCI_HEADER_TYPE_BRIDGE:
++		err = pciback_config_add_fields(dev, header_1);
++		break;
 +
-+static inline int netbk_can_queue(struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+	return netif->can_queue;
-+}
++	default:
++		err = -EINVAL;
++		printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
++		       pci_name(dev), dev->hdr_type);
++		break;
++	}
 +
-+static inline int netbk_can_sg(struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+	return netif->features & NETIF_F_SG;
++      out:
++	return err;
 +}
-+
-+#endif /* __NETIF__BACKEND__COMMON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/interface.c tmp-linux-2.6-xen.patch/drivers/xen/netback/interface.c
---- pristine-linux-2.6.18.2/drivers/xen/netback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/interface.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,336 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/interface.c
-+ * 
-+ * Network-device interface management.
-+ * 
-+ * Copyright (c) 2004-2005, Keir Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_quirks.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_quirks.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Handle special overlays for broken devices.
++ *
++ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Author: Chris Bookholt <hap10 at epoch.ncsc.mil>
 + */
 +
-+#include "common.h"
-+#include <linux/ethtool.h>
-+#include <linux/rtnetlink.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
 +
-+/*
-+ * Module parameter 'queue_length':
-+ * 
-+ * Enables queuing in the network stack when a client has run out of receive
-+ * descriptors. Although this feature can improve receive bandwidth by avoiding
-+ * packet loss, it can also result in packets sitting in the 'tx_queue' for
-+ * unbounded time. This is bad if those packets hold onto foreign resources.
-+ * For example, consider a packet that holds onto resources belonging to the
-+ * guest for which it is queued (e.g., packet received on vif1.0, destined for
-+ * vif1.1 which is not activated in the guest): in this situation the guest
-+ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
-+ * run a timer (tx_queue_timeout) to drain the queue when the interface is
-+ * blocked.
-+ */
-+static unsigned long netbk_queue_length = 32;
-+module_param_named(queue_length, netbk_queue_length, ulong, 0);
++LIST_HEAD(pciback_quirks);
 +
-+static void __netif_up(netif_t *netif)
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
 +{
-+	enable_irq(netif->irq);
-+	netif_schedule_work(netif);
++	struct pciback_config_quirk *tmp_quirk;
++
++	list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
++	    if (pci_match_id(&tmp_quirk->devid, dev))
++		goto out;
++	tmp_quirk = NULL;
++	printk(KERN_DEBUG
++	       "quirk didn't match any device pciback knows about\n");
++      out:
++	return tmp_quirk;
 +}
 +
-+static void __netif_down(netif_t *netif)
++static inline void register_quirk(struct pciback_config_quirk *quirk)
 +{
-+	disable_irq(netif->irq);
-+	netif_deschedule_work(netif);
++	list_add_tail(&quirk->quirks_list, &pciback_quirks);
 +}
 +
-+static int net_open(struct net_device *dev)
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
 +{
-+	netif_t *netif = netdev_priv(dev);
-+	if (netback_carrier_ok(netif)) {
-+		__netif_up(netif);
-+		netif_start_queue(dev);
++	int ret = 0;
++	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	struct config_field_entry *cfg_entry;
++
++	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++		if ( OFFSET(cfg_entry) == reg) {
++			ret = 1;
++			break;
++		}
 +	}
-+	return 0;
++	return ret;
 +}
 +
-+static int net_close(struct net_device *dev)
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++				    *field)
 +{
-+	netif_t *netif = netdev_priv(dev);
-+	if (netback_carrier_ok(netif))
-+		__netif_down(netif);
-+	netif_stop_queue(dev);
-+	return 0;
-+}
++	int err = 0;
 +
-+static int netbk_change_mtu(struct net_device *dev, int mtu)
-+{
-+	int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++	switch (field->size) {
++	case 1:
++		field->u.b.read = pciback_read_config_byte;
++		field->u.b.write = pciback_write_config_byte;
++		break;
++	case 2:
++		field->u.w.read = pciback_read_config_word;
++		field->u.w.write = pciback_write_config_word;
++		break;
++	case 4:
++		field->u.dw.read = pciback_read_config_dword;
++		field->u.dw.write = pciback_write_config_dword;
++		break;
++	default:
++		err = -EINVAL;
++		goto out;
++	}
 +
-+	if (mtu > max)
-+		return -EINVAL;
-+	dev->mtu = mtu;
-+	return 0;
++	pciback_config_add_field(dev, field);
++
++      out:
++	return err;
 +}
 +
-+static int netbk_set_sg(struct net_device *dev, u32 data)
++int pciback_config_quirks_init(struct pci_dev *dev)
 +{
-+	if (data) {
-+		netif_t *netif = netdev_priv(dev);
++	struct pciback_config_quirk *quirk;
++	int ret = 0;
 +
-+		if (!(netif->features & NETIF_F_SG))
-+			return -ENOSYS;
++	quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
++	if (!quirk) {
++		ret = -ENOMEM;
++		goto out;
 +	}
 +
-+	return ethtool_op_set_sg(dev, data);
++	quirk->devid.vendor = dev->vendor;
++	quirk->devid.device = dev->device;
++	quirk->devid.subvendor = dev->subsystem_vendor;
++	quirk->devid.subdevice = dev->subsystem_device;
++	quirk->devid.class = 0;
++	quirk->devid.class_mask = 0;
++	quirk->devid.driver_data = 0UL;
++
++	quirk->pdev = dev;
++
++	register_quirk(quirk);
++      out:
++	return ret;
 +}
 +
-+static int netbk_set_tso(struct net_device *dev, u32 data)
++void pciback_config_field_free(struct config_field *field)
 +{
-+	if (data) {
-+		netif_t *netif = netdev_priv(dev);
++	kfree(field);
++}
 +
-+		if (!(netif->features & NETIF_F_TSO))
-+			return -ENOSYS;
++int pciback_config_quirk_release(struct pci_dev *dev)
++{
++	struct pciback_config_quirk *quirk;
++	int ret = 0;
++
++	quirk = pciback_find_quirk(dev);
++	if (!quirk) {
++		ret = -ENXIO;
++		goto out;
 +	}
 +
-+	return ethtool_op_set_tso(dev, data);
++	list_del(&quirk->quirks_list);
++	kfree(quirk);
++
++      out:
++	return ret;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/conf_space_quirks.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/conf_space_quirks.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,35 @@
++/*
++ * PCI Backend - Data structures for special overlays for broken devices.
++ *
++ * Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Chris Bookholt <hap10 at epoch.ncsc.mil>
++ */
 +
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+	.get_tx_csum = ethtool_op_get_tx_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
-+	.get_sg = ethtool_op_get_sg,
-+	.set_sg = netbk_set_sg,
-+	.get_tso = ethtool_op_get_tso,
-+	.set_tso = netbk_set_tso,
-+	.get_link = ethtool_op_get_link,
++#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_quirk {
++	struct list_head quirks_list;
++	struct pci_device_id devid;
++	struct pci_dev *pdev;
 +};
 +
-+netif_t *netif_alloc(domid_t domid, unsigned int handle)
-+{
-+	int err = 0;
-+	struct net_device *dev;
-+	netif_t *netif;
-+	char name[IFNAMSIZ] = {};
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
 +
-+	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-+	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+	if (dev == NULL) {
-+		DPRINTK("Could not create netif: out of memory\n");
-+		return ERR_PTR(-ENOMEM);
-+	}
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++				    *field);
 +
-+	netif = netdev_priv(dev);
-+	memset(netif, 0, sizeof(*netif));
-+	netif->domid  = domid;
-+	netif->handle = handle;
-+	atomic_set(&netif->refcnt, 1);
-+	init_waitqueue_head(&netif->waiting_to_free);
-+	netif->dev = dev;
++int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
 +
-+	netback_carrier_off(netif);
++int pciback_config_quirks_init(struct pci_dev *dev);
 +
-+	netif->credit_bytes = netif->remaining_credit = ~0UL;
-+	netif->credit_usec  = 0UL;
-+	init_timer(&netif->credit_timeout);
-+	/* Initialize 'expires' now: it's used to track the credit window. */
-+	netif->credit_timeout.expires = jiffies;
++void pciback_config_field_free(struct config_field *field);
 +
-+	init_timer(&netif->tx_queue_timeout);
++int pciback_config_quirk_release(struct pci_dev *dev);
 +
-+	dev->hard_start_xmit = netif_be_start_xmit;
-+	dev->get_stats       = netif_be_get_stats;
-+	dev->open            = net_open;
-+	dev->stop            = net_close;
-+	dev->change_mtu	     = netbk_change_mtu;
-+	dev->features        = NETIF_F_IP_CSUM;
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
 +
-+	SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/controller.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/controller.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,408 @@
++/*
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ *      Alex Williamson <alex.williamson at hp.com>
++ *
++ * PCI "Controller" Backend - virtualize PCI bus topology based on PCI
++ * controllers.  Devices under the same PCI controller are exposed on the
++ * same virtual domain:bus.  Within a bus, device slots are virtualized
++ * to compact the bus.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
 +
-+	dev->tx_queue_len = netbk_queue_length;
++#include <linux/acpi.h>
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
 +
-+	/*
-+	 * Initialise a dummy MAC address. We choose the numerically
-+	 * largest non-broadcast address to prevent the address getting
-+	 * stolen by an Ethernet bridge for STP purposes.
-+	 * (FE:FF:FF:FF:FF:FF)
-+	 */ 
-+	memset(dev->dev_addr, 0xFF, ETH_ALEN);
-+	dev->dev_addr[0] &= ~0x01;
++#define PCI_MAX_BUSSES	255
++#define PCI_MAX_SLOTS	32
 +
-+	rtnl_lock();
-+	err = register_netdevice(dev);
-+	rtnl_unlock();
-+	if (err) {
-+		DPRINTK("Could not register new net device %s: err=%d\n",
-+			dev->name, err);
-+		free_netdev(dev);
-+		return ERR_PTR(err);
++struct controller_dev_entry {
++	struct list_head list;
++	struct pci_dev *dev;
++	unsigned int devfn;
++};
++
++struct controller_list_entry {
++	struct list_head list;
++	struct pci_controller *controller;
++	unsigned int domain;
++	unsigned int bus;
++	unsigned int next_devfn;
++	struct list_head dev_list;
++};
++
++struct controller_dev_data {
++	struct list_head list;
++	unsigned int next_domain;
++	unsigned int next_bus;
++	spinlock_t lock;
++};
++
++struct walk_info {
++	struct pciback_device *pdev;
++	int resource_count;
++	int root_num;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus,
++				    unsigned int devfn)
++{
++	struct controller_dev_data *dev_data = pdev->pci_dev_data;
++	struct controller_dev_entry *dev_entry;
++	struct controller_list_entry *cntrl_entry;
++	struct pci_dev *dev = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev_data->lock, flags);
++
++	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++		if (cntrl_entry->domain != domain ||
++		    cntrl_entry->bus != bus)
++			continue;
++
++		list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++			if (devfn == dev_entry->devfn) {
++				dev = dev_entry->dev;
++				goto found;
++			}
++		}
 +	}
++found:
++	spin_unlock_irqrestore(&dev_data->lock, flags);
 +
-+	DPRINTK("Successfully created netif\n");
-+	return netif;
++	return dev;
 +}
 +
-+static int map_frontend_pages(
-+	netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++			int devid, publish_pci_dev_cb publish_cb)
 +{
-+	struct gnttab_map_grant_ref op;
++	struct controller_dev_data *dev_data = pdev->pci_dev_data;
++	struct controller_dev_entry *dev_entry;
++	struct controller_list_entry *cntrl_entry;
++	struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
++	unsigned long flags;
++	int ret = 0, found = 0;
 +
-+	gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+			  GNTMAP_host_map, tx_ring_ref, netif->domid);
-+    
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++	spin_lock_irqsave(&dev_data->lock, flags);
 +
-+	if (op.status) { 
-+		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
-+		return op.status;
++	/* Look to see if we already have a domain:bus for this controller */
++	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++		if (cntrl_entry->controller == dev_controller) {
++			found = 1;
++			break;
++		}
 +	}
 +
-+	netif->tx_shmem_ref    = tx_ring_ref;
-+	netif->tx_shmem_handle = op.handle;
++	if (!found) {
++		cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
++		if (!cntrl_entry) {
++			ret =  -ENOMEM;
++			goto out;
++		}
 +
-+	gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+			  GNTMAP_host_map, rx_ring_ref, netif->domid);
++		cntrl_entry->controller = dev_controller;
++		cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++		cntrl_entry->domain = dev_data->next_domain;
++		cntrl_entry->bus = dev_data->next_bus++;
++		if (dev_data->next_bus > PCI_MAX_BUSSES) {
++			dev_data->next_domain++;
++			dev_data->next_bus = 0;
++		}
 +
-+	if (op.status) {
-+		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
-+		return op.status;
++		INIT_LIST_HEAD(&cntrl_entry->dev_list);
++
++		list_add_tail(&cntrl_entry->list, &dev_data->list);
 +	}
 +
-+	netif->rx_shmem_ref    = rx_ring_ref;
-+	netif->rx_shmem_handle = op.handle;
++	if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
++		/*
++		 * While it seems unlikely, this can actually happen if
++		 * a controller has P2P bridges under it.
++		 */
++		xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
++				 "is full, no room to export %04x:%02x:%02x.%x",
++				 cntrl_entry->domain, cntrl_entry->bus,
++				 pci_domain_nr(dev->bus), dev->bus->number,
++				 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
++		ret = -ENOSPC;
++		goto out;
++	}
 +
-+	return 0;
-+}
++	dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
++	if (!dev_entry) {
++		if (list_empty(&cntrl_entry->dev_list)) {
++			list_del(&cntrl_entry->list);
++			kfree(cntrl_entry);
++		}
++		ret = -ENOMEM;
++		goto out;
++	}
 +
-+static void unmap_frontend_pages(netif_t *netif)
-+{
-+	struct gnttab_unmap_grant_ref op;
++	dev_entry->dev = dev;
++	dev_entry->devfn = cntrl_entry->next_devfn;
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+			    GNTMAP_host_map, netif->tx_shmem_handle);
++	list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+			    GNTMAP_host_map, netif->rx_shmem_handle);
++out:
++	spin_unlock_irqrestore(&dev_data->lock, flags);
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	/* TODO: Publish virtual domain:bus:slot.func here. */
++
++	return ret;
 +}
 +
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+	      unsigned long rx_ring_ref, unsigned int evtchn)
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
 +{
-+	int err = -ENOMEM;
-+	netif_tx_sring_t *txs;
-+	netif_rx_sring_t *rxs;
++	struct controller_dev_data *dev_data = pdev->pci_dev_data;
++	struct controller_list_entry *cntrl_entry;
++	struct controller_dev_entry *dev_entry = NULL;
++	struct pci_dev *found_dev = NULL;
++	unsigned long flags;
 +
-+	/* Already connected through? */
-+	if (netif->irq)
-+		return 0;
++	spin_lock_irqsave(&dev_data->lock, flags);
 +
-+	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
-+	if (netif->tx_comms_area == NULL)
-+		return -ENOMEM;
-+	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
-+	if (netif->rx_comms_area == NULL)
-+		goto err_rx;
++	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++		if (cntrl_entry->controller != PCI_CONTROLLER(dev))
++			continue;
++
++		list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++			if (dev_entry->dev == dev) {
++				found_dev = dev_entry->dev;
++				break;
++			}
++		}
++	}
++
++	if (!found_dev) {
++		spin_unlock_irqrestore(&dev_data->lock, flags);
++		return;
++	}
++
++	list_del(&dev_entry->list);
++	kfree(dev_entry);
 +
-+	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
-+	if (err)
-+		goto err_map;
++	if (list_empty(&cntrl_entry->dev_list)) {
++		list_del(&cntrl_entry->list);
++		kfree(cntrl_entry);
++	}
 +
-+	err = bind_interdomain_evtchn_to_irqhandler(
-+		netif->domid, evtchn, netif_be_int, 0,
-+		netif->dev->name, netif);
-+	if (err < 0)
-+		goto err_hypervisor;
-+	netif->irq = err;
-+	disable_irq(netif->irq);
++	spin_unlock_irqrestore(&dev_data->lock, flags);
++	pcistub_put_pci_dev(found_dev);
++}
 +
-+	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
++int pciback_init_devices(struct pciback_device *pdev)
++{
++	struct controller_dev_data *dev_data;
 +
-+	rxs = (netif_rx_sring_t *)
-+		((char *)netif->rx_comms_area->addr);
-+	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
++	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++	if (!dev_data)
++		return -ENOMEM;
 +
-+	netif->rx_req_cons_peek = 0;
++	spin_lock_init(&dev_data->lock);
 +
-+	netif_get(netif);
++	INIT_LIST_HEAD(&dev_data->list);
 +
-+	rtnl_lock();
-+	netback_carrier_on(netif);
-+	if (netif_running(netif->dev))
-+		__netif_up(netif);
-+	rtnl_unlock();
++	/* Starting domain:bus numbers */
++	dev_data->next_domain = 0;
++	dev_data->next_bus = 0;
++
++	pdev->pci_dev_data = dev_data;
 +
 +	return 0;
-+err_hypervisor:
-+	unmap_frontend_pages(netif);
-+err_map:
-+	free_vm_area(netif->rx_comms_area);
-+err_rx:
-+	free_vm_area(netif->tx_comms_area);
-+	return err;
 +}
 +
-+void netif_disconnect(netif_t *netif)
++static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
 +{
-+	if (netback_carrier_ok(netif)) {
-+		rtnl_lock();
-+		netback_carrier_off(netif);
-+		netif_carrier_off(netif->dev); /* discard queued packets */
-+		if (netif_running(netif->dev))
-+			__netif_down(netif);
-+		rtnl_unlock();
-+		netif_put(netif);
-+	}
++	struct walk_info *info = data;
++	struct acpi_resource_address64 addr;
++	acpi_status status;
++	int i, len, err;
++	char str[32], tmp[3];
++	unsigned char *ptr, *buf;
 +
-+	atomic_dec(&netif->refcnt);
-+	wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
++	status = acpi_resource_to_address64(res, &addr);
 +
-+	del_timer_sync(&netif->credit_timeout);
-+	del_timer_sync(&netif->tx_queue_timeout);
++	/* Do we care about this range?  Let's check. */
++	if (!ACPI_SUCCESS(status) ||
++	    !(addr.resource_type == ACPI_MEMORY_RANGE ||
++	      addr.resource_type == ACPI_IO_RANGE) ||
++	    !addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
++		return AE_OK;
 +
-+	if (netif->irq)
-+		unbind_from_irqhandler(netif->irq, netif);
-+	
-+	unregister_netdev(netif->dev);
++	/*
++	 * Furthermore, we really only care to tell the guest about
++	 * address ranges that require address translation of some sort.
++	 */
++	if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
++	      addr.info.mem.translation) &&
++	    !(addr.resource_type == ACPI_IO_RANGE &&
++	      addr.info.io.translation))
++		return AE_OK;
++	   
++	/* Store the resource in xenbus for the guest */
++	len = snprintf(str, sizeof(str), "root-%d-resource-%d",
++		       info->root_num, info->resource_count);
++	if (unlikely(len >= (sizeof(str) - 1)))
++		return AE_OK;
 +
-+	if (netif->tx.sring) {
-+		unmap_frontend_pages(netif);
-+		free_vm_area(netif->tx_comms_area);
-+		free_vm_area(netif->rx_comms_area);
-+	}
++	buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
++	if (!buf)
++		return AE_OK;
 +
-+	free_netdev(netif->dev);
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/loopback.c tmp-linux-2.6-xen.patch/drivers/xen/netback/loopback.c
---- pristine-linux-2.6.18.2/drivers/xen/netback/loopback.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/loopback.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,324 @@
-+/******************************************************************************
-+ * netback/loopback.c
-+ * 
-+ * A two-interface loopback device to emulate a local netfront-netback
-+ * connection. This ensures that local packet delivery looks identical
-+ * to inter-domain delivery. Most importantly, packets delivered locally
-+ * originating from other domains will get *copied* when they traverse this
-+ * driver. This prevents unbounded delays in socket-buffer queues from
-+ * causing the netback driver to "seize up".
-+ * 
-+ * This driver creates a symmetric pair of loopback interfaces with names
-+ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
-+ * bridge, just like a proper netback interface, while a local IP interface
-+ * is configured on 'veth0'.
-+ * 
-+ * As with a real netback interface, vif0.0 is configured with a suitable
-+ * dummy MAC address. No default is provided for veth0: a reasonable strategy
-+ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
-+ * (to avoid confusing the Etherbridge).
-+ * 
-+ * Copyright (c) 2005 K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	/* Clean out resource_source */
++	res->data.address64.resource_source.index = 0xFF;
++	res->data.address64.resource_source.string_length = 0;
++	res->data.address64.resource_source.string_ptr = NULL;
 +
-+#include <linux/module.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/ethtool.h>
-+#include <net/dst.h>
-+#include <net/xfrm.h>		/* secpath_reset() */
-+#include <asm/hypervisor.h>	/* is_initial_xendomain() */
++	ptr = (unsigned char *)res;
 +
-+static int nloopbacks = -1;
-+module_param(nloopbacks, int, 0);
-+MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
++	/* Turn the acpi_resource into an ASCII byte stream */
++	for (i = 0; i < sizeof(*res); i++) {
++		snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
++		strncat(buf, tmp, 2);
++	}
 +
-+struct net_private {
-+	struct net_device *loopback_dev;
-+	struct net_device_stats stats;
-+};
++	err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
++			    str, "%s", buf);
 +
-+static int loopback_open(struct net_device *dev)
-+{
-+	struct net_private *np = netdev_priv(dev);
-+	memset(&np->stats, 0, sizeof(np->stats));
-+	netif_start_queue(dev);
-+	return 0;
-+}
++	if (!err)
++		info->resource_count++;
 +
-+static int loopback_close(struct net_device *dev)
-+{
-+	netif_stop_queue(dev);
-+	return 0;
-+}
++	kfree(buf);
 +
-+#ifdef CONFIG_X86
-+static int is_foreign(unsigned long pfn)
-+{
-+	/* NB. Play it safe for auto-translation mode. */
-+	return (xen_feature(XENFEAT_auto_translated_physmap) ||
-+		(phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
++	return AE_OK;
 +}
-+#else
-+/* How to detect a foreign mapping? Play it safe. */
-+#define is_foreign(pfn)	(1)
-+#endif
 +
-+static int skb_remove_foreign_references(struct sk_buff *skb)
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++			      publish_pci_root_cb publish_root_cb)
 +{
-+	struct page *page;
-+	unsigned long pfn;
-+	int i, off;
-+	char *vaddr;
-+
-+	BUG_ON(skb_shinfo(skb)->frag_list);
++	struct controller_dev_data *dev_data = pdev->pci_dev_data;
++	struct controller_list_entry *cntrl_entry;
++	int i, root_num, len, err = 0;
++	unsigned int domain, bus;
++	char str[64];
++	struct walk_info info;
 +
-+	if (skb_cloned(skb) &&
-+	    unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
-+		return 0;
++	spin_lock(&dev_data->lock);
 +
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
-+		if (!is_foreign(pfn))
-+			continue;
-+		
-+		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-+		if (unlikely(!page))
-+			return 0;
++	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++		/* First publish all the domain:bus info */
++		err = publish_root_cb(pdev, cntrl_entry->domain,
++				      cntrl_entry->bus);
++		if (err)
++			goto out;
 +
-+		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
-+		off = skb_shinfo(skb)->frags[i].page_offset;
-+		memcpy(page_address(page) + off,
-+		       vaddr + off,
-+		       skb_shinfo(skb)->frags[i].size);
-+		kunmap_skb_frag(vaddr);
++		/*
++ 		 * Now figure out which root-%d this belongs to
++		 * so we can associate resources with it.
++		 */
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++				   "root_num", "%d", &root_num);
 +
-+		put_page(skb_shinfo(skb)->frags[i].page);
-+		skb_shinfo(skb)->frags[i].page = page;
-+	}
++		if (err != 1)
++			goto out;
 +
-+	return 1;
-+}
++		for (i = 0; i < root_num; i++) {
++			len = snprintf(str, sizeof(str), "root-%d", i);
++			if (unlikely(len >= (sizeof(str) - 1))) {
++				err = -ENOMEM;
++				goto out;
++			}
 +
-+static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+	struct net_private *np = netdev_priv(dev);
++			err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++					   str, "%x:%x", &domain, &bus);
++			if (err != 2)
++				goto out;
 +
-+	if (!skb_remove_foreign_references(skb)) {
-+		np->stats.tx_dropped++;
-+		dev_kfree_skb(skb);
-+		return 0;
-+	}
++			/* Is this the one we just published? */
++			if (domain == cntrl_entry->domain &&
++			    bus == cntrl_entry->bus)
++				break;
++		}
 +
-+	dst_release(skb->dst);
-+	skb->dst = NULL;
++		if (i == root_num)
++			goto out;
 +
-+	skb_orphan(skb);
++		info.pdev = pdev;
++		info.resource_count = 0;
++		info.root_num = i;
 +
-+	np->stats.tx_bytes += skb->len;
-+	np->stats.tx_packets++;
++		/* Let ACPI do the heavy lifting on decoding resources */
++		acpi_walk_resources(cntrl_entry->controller->acpi_handle,
++				    METHOD_NAME__CRS, write_xenbus_resource,
++				    &info);
 +
-+	/* Switch to loopback context. */
-+	dev = np->loopback_dev;
-+	np  = netdev_priv(dev);
++		/* No resouces.  OK.  On to the next one */
++		if (!info.resource_count)
++			continue;
 +
-+	np->stats.rx_bytes += skb->len;
-+	np->stats.rx_packets++;
++		/* Store the number of resources we wrote for this root-%d */
++		len = snprintf(str, sizeof(str), "root-%d-resources", i);
++		if (unlikely(len >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
++		}
 +
-+	if (skb->ip_summed == CHECKSUM_HW) {
-+		/* Defer checksum calculation. */
-+		skb->proto_csum_blank = 1;
-+		/* Must be a local packet: assert its integrity. */
-+		skb->proto_data_valid = 1;
++		err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++				    "%d", info.resource_count);
++		if (err)
++			goto out;
 +	}
 +
-+	skb->ip_summed = skb->proto_data_valid ?
-+		CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
-+
-+	skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
-+	skb->protocol = eth_type_trans(skb, dev);
-+	skb->dev      = dev;
-+	dev->last_rx  = jiffies;
++	/* Finally, write some magic to synchronize with the guest. */
++	len = snprintf(str, sizeof(str), "root-resource-magic");
++	if (unlikely(len >= (sizeof(str) - 1))) {
++		err = -ENOMEM;
++		goto out;
++	}
 +
-+	/* Flush netfilter context: rx'ed skbuffs not expected to have any. */
-+	nf_reset(skb);
-+	secpath_reset(skb);
++	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++			    "%lx", (sizeof(struct acpi_resource) * 2) + 1);
 +
-+	netif_rx(skb);
++out:
++	spin_unlock(&dev_data->lock);
 +
-+	return 0;
++	return err;
 +}
 +
-+static struct net_device_stats *loopback_get_stats(struct net_device *dev)
++void pciback_release_devices(struct pciback_device *pdev)
 +{
-+	struct net_private *np = netdev_priv(dev);
-+	return &np->stats;
-+}
++	struct controller_dev_data *dev_data = pdev->pci_dev_data;
++	struct controller_list_entry *cntrl_entry, *c;
++	struct controller_dev_entry *dev_entry, *d;
 +
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+	.get_tx_csum = ethtool_op_get_tx_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
-+	.get_sg = ethtool_op_get_sg,
-+	.set_sg = ethtool_op_set_sg,
-+	.get_tso = ethtool_op_get_tso,
-+	.set_tso = ethtool_op_set_tso,
-+	.get_link = ethtool_op_get_link,
-+};
++	list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
++		list_for_each_entry_safe(dev_entry, d,
++					 &cntrl_entry->dev_list, list) {
++			list_del(&dev_entry->list);
++			pcistub_put_pci_dev(dev_entry->dev);
++			kfree(dev_entry);
++		}
++		list_del(&cntrl_entry->list);
++		kfree(cntrl_entry);
++	}
 +
++	kfree(dev_data);
++	pdev->pci_dev_data = NULL;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/passthrough.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/passthrough.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,166 @@
 +/*
-+ * Nothing to do here. Virtual interface is point-to-point and the
-+ * physical interface is probably promiscuous anyway.
++ * PCI Backend - Provides restricted access to the real PCI bus topology
++ *               to the frontend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
 + */
-+static void loopback_set_multicast_list(struct net_device *dev)
-+{
-+}
 +
-+static void loopback_construct(struct net_device *dev, struct net_device *lo)
-+{
-+	struct net_private *np = netdev_priv(dev);
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
 +
-+	np->loopback_dev     = lo;
++struct passthrough_dev_data {
++	/* Access to dev_list must be protected by lock */
++	struct list_head dev_list;
++	spinlock_t lock;
++};
 +
-+	dev->open            = loopback_open;
-+	dev->stop            = loopback_close;
-+	dev->hard_start_xmit = loopback_start_xmit;
-+	dev->get_stats       = loopback_get_stats;
-+	dev->set_multicast_list = loopback_set_multicast_list;
-+	dev->change_mtu	     = NULL; /* allow arbitrary mtu */
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus,
++				    unsigned int devfn)
++{
++	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++	struct pci_dev_entry *dev_entry;
++	struct pci_dev *dev = NULL;
++	unsigned long flags;
 +
-+	dev->tx_queue_len    = 0;
++	spin_lock_irqsave(&dev_data->lock, flags);
 +
-+	dev->features        = (NETIF_F_HIGHDMA |
-+				NETIF_F_LLTX |
-+				NETIF_F_TSO |
-+				NETIF_F_SG |
-+				NETIF_F_IP_CSUM);
++	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++		if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
++		    && bus == (unsigned int)dev_entry->dev->bus->number
++		    && devfn == dev_entry->dev->devfn) {
++			dev = dev_entry->dev;
++			break;
++		}
++	}
 +
-+	SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++	spin_unlock_irqrestore(&dev_data->lock, flags);
 +
-+	/*
-+	 * We do not set a jumbo MTU on the interface. Otherwise the network
-+	 * stack will try to send large packets that will get dropped by the
-+	 * Ethernet bridge (unless the physical Ethernet interface is
-+	 * configured to transfer jumbo packets). If a larger MTU is desired
-+	 * then the system administrator can specify it using the 'ifconfig'
-+	 * command.
-+	 */
-+	/*dev->mtu             = 16*1024;*/
++	return dev;
 +}
 +
-+static int __init make_loopback(int i)
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++			int devid, publish_pci_dev_cb publish_cb)
 +{
-+	struct net_device *dev1, *dev2;
-+	char dev_name[IFNAMSIZ];
-+	int err = -ENOMEM;
-+
-+	sprintf(dev_name, "vif0.%d", i);
-+	dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+	if (!dev1)
-+		return err;
-+
-+	sprintf(dev_name, "veth%d", i);
-+	dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+	if (!dev2)
-+		goto fail_netdev2;
-+
-+	loopback_construct(dev1, dev2);
-+	loopback_construct(dev2, dev1);
-+
-+	/*
-+	 * Initialise a dummy MAC address for the 'dummy backend' interface. We
-+	 * choose the numerically largest non-broadcast address to prevent the
-+	 * address getting stolen by an Ethernet bridge for STP purposes.
-+	 */
-+	memset(dev1->dev_addr, 0xFF, ETH_ALEN);
-+	dev1->dev_addr[0] &= ~0x01;
++	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++	struct pci_dev_entry *dev_entry;
++	unsigned long flags;
++	unsigned int domain, bus, devfn;
++	int err;
 +
-+	if ((err = register_netdev(dev1)) != 0)
-+		goto fail;
++	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++	if (!dev_entry)
++		return -ENOMEM;
++	dev_entry->dev = dev;
 +
-+	if ((err = register_netdev(dev2)) != 0) {
-+		unregister_netdev(dev1);
-+		goto fail;
-+	}
++	spin_lock_irqsave(&dev_data->lock, flags);
++	list_add_tail(&dev_entry->list, &dev_data->dev_list);
++	spin_unlock_irqrestore(&dev_data->lock, flags);
 +
-+	return 0;
++	/* Publish this device. */
++	domain = (unsigned int)pci_domain_nr(dev->bus);
++	bus = (unsigned int)dev->bus->number;
++	devfn = dev->devfn;
++	err = publish_cb(pdev, domain, bus, devfn, devid);
 +
-+ fail:
-+	free_netdev(dev2);
-+ fail_netdev2:
-+	free_netdev(dev1);
 +	return err;
 +}
 +
-+static void __exit clean_loopback(int i)
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
 +{
-+	struct net_device *dev1, *dev2;
-+	char dev_name[IFNAMSIZ];
++	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++	struct pci_dev_entry *dev_entry, *t;
++	struct pci_dev *found_dev = NULL;
++	unsigned long flags;
 +
-+	sprintf(dev_name, "vif0.%d", i);
-+	dev1 = dev_get_by_name(dev_name);
-+	sprintf(dev_name, "veth%d", i);
-+	dev2 = dev_get_by_name(dev_name);
-+	if (dev1 && dev2) {
-+		unregister_netdev(dev2);
-+		unregister_netdev(dev1);
-+		free_netdev(dev2);
-+		free_netdev(dev1);
++	spin_lock_irqsave(&dev_data->lock, flags);
++
++	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++		if (dev_entry->dev == dev) {
++			list_del(&dev_entry->list);
++			found_dev = dev_entry->dev;
++			kfree(dev_entry);
++		}
 +	}
++
++	spin_unlock_irqrestore(&dev_data->lock, flags);
++
++	if (found_dev)
++		pcistub_put_pci_dev(found_dev);
 +}
 +
-+static int __init loopback_init(void)
++int pciback_init_devices(struct pciback_device *pdev)
 +{
-+	int i, err = 0;
-+
-+	if (nloopbacks == -1)
-+		nloopbacks = is_initial_xendomain() ? 4 : 0;
++	struct passthrough_dev_data *dev_data;
 +
-+	for (i = 0; i < nloopbacks; i++)
-+		if ((err = make_loopback(i)) != 0)
-+			break;
++	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++	if (!dev_data)
++		return -ENOMEM;
 +
-+	return err;
-+}
++	spin_lock_init(&dev_data->lock);
 +
-+module_init(loopback_init);
++	INIT_LIST_HEAD(&dev_data->dev_list);
 +
-+static void __exit loopback_exit(void)
-+{
-+	int i;
++	pdev->pci_dev_data = dev_data;
 +
-+	for (i = nloopbacks; i-- > 0; )
-+		clean_loopback(i);
++	return 0;
 +}
 +
-+module_exit(loopback_exit);
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++			      publish_pci_root_cb publish_root_cb)
++{
++	int err = 0;
++	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++	struct pci_dev_entry *dev_entry, *e;
++	struct pci_dev *dev;
++	int found;
++	unsigned int domain, bus;
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/Makefile tmp-linux-2.6-xen.patch/drivers/xen/netback/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/netback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,5 @@
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
-+obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
++	spin_lock(&dev_data->lock);
 +
-+netbk-y   := netback.o xenbus.o interface.o
-+netloop-y := loopback.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/netback.c tmp-linux-2.6-xen.patch/drivers/xen/netback/netback.c
---- pristine-linux-2.6.18.2/drivers/xen/netback/netback.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/netback.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1496 @@
-+/******************************************************************************
-+ * drivers/xen/netback/netback.c
-+ * 
-+ * Back-end of the driver for virtual network devices. This portion of the
-+ * driver exports a 'unified' network-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A 
-+ * reference front-end implementation can be found in:
-+ *  drivers/xen/netfront/netfront.c
-+ * 
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++		/* Only publish this device as a root if none of its
++		 * parent bridges are exported
++		 */
++		found = 0;
++		dev = dev_entry->dev->bus->self;
++		for (; !found && dev != NULL; dev = dev->bus->self) {
++			list_for_each_entry(e, &dev_data->dev_list, list) {
++				if (dev == e->dev) {
++					found = 1;
++					break;
++				}
++			}
++		}
 +
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
++		domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
++		bus = (unsigned int)dev_entry->dev->bus->number;
 +
-+/*define NETBE_DEBUG_INTERRUPT*/
++		if (!found) {
++			err = publish_root_cb(pdev, domain, bus);
++			if (err)
++				break;
++		}
++	}
 +
-+/* extra field used in struct page */
-+#define netif_page_index(pg) (*(long *)&(pg)->mapping)
++	spin_unlock(&dev_data->lock);
 +
-+struct netbk_rx_meta {
-+	skb_frag_t frag;
-+	int id;
-+	u8 copy:1;
-+};
++	return err;
++}
 +
-+static void netif_idx_release(u16 pending_idx);
-+static void netif_page_release(struct page *page);
-+static void make_tx_response(netif_t *netif, 
-+			     netif_tx_request_t *txp,
-+			     s8       st);
-+static netif_rx_response_t *make_rx_response(netif_t *netif, 
-+					     u16      id, 
-+					     s8       st,
-+					     u16      offset,
-+					     u16      size,
-+					     u16      flags);
++void pciback_release_devices(struct pciback_device *pdev)
++{
++	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++	struct pci_dev_entry *dev_entry, *t;
 +
-+static void net_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
++	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++		list_del(&dev_entry->list);
++		pcistub_put_pci_dev(dev_entry->dev);
++		kfree(dev_entry);
++	}
 +
-+static void net_rx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
++	kfree(dev_data);
++	pdev->pci_dev_data = NULL;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/pci_stub.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/pci_stub.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,948 @@
++/*
++ * PCI Stub Driver - Grabs devices in backend to be exported later
++ *
++ * Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Chris Bookholt <hap10 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/kref.h>
++#include <asm/atomic.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
 +
-+static struct timer_list net_timer;
++static char *pci_devs_to_hide = NULL;
++module_param_named(hide, pci_devs_to_hide, charp, 0444);
 +
-+#define MAX_PENDING_REQS 256
++struct pcistub_device_id {
++	struct list_head slot_list;
++	int domain;
++	unsigned char bus;
++	unsigned int devfn;
++};
++static LIST_HEAD(pcistub_device_ids);
++static DEFINE_SPINLOCK(device_ids_lock);
 +
-+static struct sk_buff_head rx_queue;
++struct pcistub_device {
++	struct kref kref;
++	struct list_head dev_list;
++	spinlock_t lock;
 +
-+static struct page **mmap_pages;
-+static inline unsigned long idx_to_kaddr(unsigned int idx)
-+{
-+	return (unsigned long)pfn_to_kaddr(page_to_pfn(mmap_pages[idx]));
-+}
++	struct pci_dev *dev;
++	struct pciback_device *pdev;	/* non-NULL if struct pci_dev is in use */
++};
 +
-+#define PKT_PROT_LEN 64
++/* Access to pcistub_devices & seized_devices lists and the initialize_devices
++ * flag must be locked with pcistub_devices_lock
++ */
++static DEFINE_SPINLOCK(pcistub_devices_lock);
++static LIST_HEAD(pcistub_devices);
 +
-+static struct pending_tx_info {
-+	netif_tx_request_t req;
-+	netif_t *netif;
-+} pending_tx_info[MAX_PENDING_REQS];
-+static u16 pending_ring[MAX_PENDING_REQS];
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++/* wait for device_initcall before initializing our devices
++ * (see pcistub_init_devices_late)
++ */
++static int initialize_devices = 0;
++static LIST_HEAD(seized_devices);
 +
-+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-+static u16 dealloc_ring[MAX_PENDING_REQS];
-+static PEND_RING_IDX dealloc_prod, dealloc_cons;
++static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
++{
++	struct pcistub_device *psdev;
 +
-+static struct sk_buff_head tx_queue;
++	dev_dbg(&dev->dev, "pcistub_device_alloc\n");
 +
-+static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-+static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-+static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++	psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
++	if (!psdev)
++		return NULL;
 +
-+static struct list_head net_schedule_list;
-+static spinlock_t net_schedule_list_lock;
++	psdev->dev = pci_dev_get(dev);
++	if (!psdev->dev) {
++		kfree(psdev);
++		return NULL;
++	}
 +
-+#define MAX_MFN_ALLOC 64
-+static unsigned long mfn_list[MAX_MFN_ALLOC];
-+static unsigned int alloc_index = 0;
++	kref_init(&psdev->kref);
++	spin_lock_init(&psdev->lock);
 +
-+static inline unsigned long alloc_mfn(void)
-+{
-+	BUG_ON(alloc_index == 0);
-+	return mfn_list[--alloc_index];
++	return psdev;
 +}
 +
-+static int check_mfn(int nr)
++/* Don't call this directly as it's called by pcistub_device_put */
++static void pcistub_device_release(struct kref *kref)
 +{
-+	struct xen_memory_reservation reservation = {
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
++	struct pcistub_device *psdev;
 +
-+	if (likely(alloc_index >= nr))
-+		return 0;
++	psdev = container_of(kref, struct pcistub_device, kref);
 +
-+	set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
-+	reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
-+	alloc_index += HYPERVISOR_memory_op(XENMEM_increase_reservation,
-+					    &reservation);
++	dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
 +
-+	return alloc_index >= nr ? 0 : -ENOMEM;
++	/* Clean-up the device */
++	pciback_reset_device(psdev->dev);
++	pciback_config_free_dyn_fields(psdev->dev);
++	pciback_config_free_dev(psdev->dev);
++	kfree(pci_get_drvdata(psdev->dev));
++	pci_set_drvdata(psdev->dev, NULL);
++
++	pci_dev_put(psdev->dev);
++
++	kfree(psdev);
 +}
 +
-+static inline void maybe_schedule_tx_action(void)
++static inline void pcistub_device_get(struct pcistub_device *psdev)
 +{
-+	smp_mb();
-+	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+	    !list_empty(&net_schedule_list))
-+		tasklet_schedule(&net_tx_tasklet);
++	kref_get(&psdev->kref);
 +}
 +
-+static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++static inline void pcistub_device_put(struct pcistub_device *psdev)
 +{
-+	struct skb_shared_info *ninfo;
-+	struct sk_buff *nskb;
-+	unsigned long offset;
-+	int ret;
-+	int len;
-+	int headlen;
-+
-+	BUG_ON(skb_shinfo(skb)->frag_list != NULL);
-+
-+	nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
-+	if (unlikely(!nskb))
-+		goto err;
-+
-+	skb_reserve(nskb, 16 + NET_IP_ALIGN);
-+	headlen = nskb->end - nskb->data;
-+	if (headlen > skb_headlen(skb))
-+		headlen = skb_headlen(skb);
-+	ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
-+	BUG_ON(ret);
-+
-+	ninfo = skb_shinfo(nskb);
-+	ninfo->gso_size = skb_shinfo(skb)->gso_size;
-+	ninfo->gso_type = skb_shinfo(skb)->gso_type;
-+
-+	offset = headlen;
-+	len = skb->len - headlen;
++	kref_put(&psdev->kref, pcistub_device_release);
++}
 +
-+	nskb->len = skb->len;
-+	nskb->data_len = len;
-+	nskb->truesize += len;
++static struct pcistub_device *pcistub_device_find(int domain, int bus,
++						  int slot, int func)
++{
++	struct pcistub_device *psdev = NULL;
++	unsigned long flags;
 +
-+	while (len) {
-+		struct page *page;
-+		int copy;
-+		int zero;
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+		if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
-+			dump_stack();
-+			goto err_free;
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (psdev->dev != NULL
++		    && domain == pci_domain_nr(psdev->dev->bus)
++		    && bus == psdev->dev->bus->number
++		    && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++			pcistub_device_get(psdev);
++			goto out;
 +		}
++	}
 +
-+		copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
-+		zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++	/* didn't find it */
++	psdev = NULL;
 +
-+		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
-+		if (unlikely(!page))
-+			goto err_free;
++      out:
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	return psdev;
++}
 +
-+		ret = skb_copy_bits(skb, offset, page_address(page), copy);
-+		BUG_ON(ret);
++static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
++						  struct pcistub_device *psdev)
++{
++	struct pci_dev *pci_dev = NULL;
++	unsigned long flags;
 +
-+		ninfo->frags[ninfo->nr_frags].page = page;
-+		ninfo->frags[ninfo->nr_frags].page_offset = 0;
-+		ninfo->frags[ninfo->nr_frags].size = copy;
-+		ninfo->nr_frags++;
++	pcistub_device_get(psdev);
 +
-+		offset += copy;
-+		len -= copy;
++	spin_lock_irqsave(&psdev->lock, flags);
++	if (!psdev->pdev) {
++		psdev->pdev = pdev;
++		pci_dev = psdev->dev;
 +	}
++	spin_unlock_irqrestore(&psdev->lock, flags);
 +
-+	offset = nskb->data - skb->data;
-+
-+	nskb->h.raw = skb->h.raw + offset;
-+	nskb->nh.raw = skb->nh.raw + offset;
-+	nskb->mac.raw = skb->mac.raw + offset;
-+
-+	return nskb;
++	if (!pci_dev)
++		pcistub_device_put(psdev);
 +
-+ err_free:
-+	kfree_skb(nskb);
-+ err:
-+	return NULL;
++	return pci_dev;
 +}
 +
-+static inline int netbk_max_required_rx_slots(netif_t *netif)
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++					    int domain, int bus,
++					    int slot, int func)
 +{
-+	if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
-+		return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
-+	return 1; /* all in one */
-+}
++	struct pcistub_device *psdev;
++	struct pci_dev *found_dev = NULL;
++	unsigned long flags;
 +
-+static inline int netbk_queue_full(netif_t *netif)
-+{
-+	RING_IDX peek   = netif->rx_req_cons_peek;
-+	RING_IDX needed = netbk_max_required_rx_slots(netif);
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+	return ((netif->rx.sring->req_prod - peek) < needed) ||
-+	       ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
-+}
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (psdev->dev != NULL
++		    && domain == pci_domain_nr(psdev->dev->bus)
++		    && bus == psdev->dev->bus->number
++		    && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++			found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++			break;
++		}
++	}
 +
-+static void tx_queue_callback(unsigned long data)
-+{
-+	netif_t *netif = (netif_t *)data;
-+	if (netif_schedulable(netif))
-+		netif_wake_queue(netif->dev);
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	return found_dev;
 +}
 +
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++				    struct pci_dev *dev)
 +{
-+	netif_t *netif = netdev_priv(dev);
-+
-+	BUG_ON(skb->dev != dev);
++	struct pcistub_device *psdev;
++	struct pci_dev *found_dev = NULL;
++	unsigned long flags;
 +
-+	/* Drop the packet if the target domain has no receive buffers. */
-+	if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
-+		goto drop;
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+	/*
-+	 * Copy the packet here if it's destined for a flipping interface
-+	 * but isn't flippable (e.g. extra references to data).
-+	 * XXX For now we also copy skbuffs whose head crosses a page
-+	 * boundary, because netbk_gop_skb can't handle them.
-+	 */
-+	if (!netif->copying_receiver ||
-+	    ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
-+		struct sk_buff *nskb = netbk_copy_skb(skb);
-+		if ( unlikely(nskb == NULL) )
-+			goto drop;
-+		/* Copy only the header fields we use in this driver. */
-+		nskb->dev = skb->dev;
-+		nskb->ip_summed = skb->ip_summed;
-+		nskb->proto_data_valid = skb->proto_data_valid;
-+		dev_kfree_skb(skb);
-+		skb = nskb;
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (psdev->dev == dev) {
++			found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++			break;
++		}
 +	}
 +
-+	netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
-+				   !!skb_shinfo(skb)->gso_size;
-+	netif_get(netif);
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	return found_dev;
++}
 +
-+	if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
-+		netif->rx.sring->req_event = netif->rx_req_cons_peek +
-+			netbk_max_required_rx_slots(netif);
-+		mb(); /* request notification /then/ check & stop the queue */
-+		if (netbk_queue_full(netif)) {
-+			netif_stop_queue(dev);
-+			/*
-+			 * Schedule 500ms timeout to restart the queue, thus
-+			 * ensuring that an inactive queue will be drained.
-+			 * Packets will be immediately be dropped until more
-+			 * receive buffers become available (see
-+			 * netbk_queue_full() check above).
-+			 */
-+			netif->tx_queue_timeout.data = (unsigned long)netif;
-+			netif->tx_queue_timeout.function = tx_queue_callback;
-+			__mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++void pcistub_put_pci_dev(struct pci_dev *dev)
++{
++	struct pcistub_device *psdev, *found_psdev = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (psdev->dev == dev) {
++			found_psdev = psdev;
++			break;
 +		}
 +	}
 +
-+	skb_queue_tail(&rx_queue, skb);
-+	tasklet_schedule(&net_rx_tasklet);
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
 +
-+	return 0;
++	/* Cleanup our device
++	 * (so it's ready for the next domain)
++	 */
++	pciback_reset_device(found_psdev->dev);
++	pciback_config_free_dyn_fields(found_psdev->dev);
++	pciback_config_reset_dev(found_psdev->dev);
 +
-+ drop:
-+	netif->stats.tx_dropped++;
-+	dev_kfree_skb(skb);
-+	return 0;
-+}
++	spin_lock_irqsave(&found_psdev->lock, flags);
++	found_psdev->pdev = NULL;
++	spin_unlock_irqrestore(&found_psdev->lock, flags);
 +
-+#if 0
-+static void xen_network_done_notify(void)
-+{
-+	static struct net_device *eth0_dev = NULL;
-+	if (unlikely(eth0_dev == NULL))
-+		eth0_dev = __dev_get_by_name("eth0");
-+	netif_rx_schedule(eth0_dev);
-+}
-+/* 
-+ * Add following to poll() function in NAPI driver (Tigon3 is example):
-+ *  if ( xen_network_done() )
-+ *      tg3_enable_ints(tp);
-+ */
-+int xen_network_done(void)
-+{
-+	return skb_queue_empty(&rx_queue);
++	pcistub_device_put(found_psdev);
 +}
-+#endif
-+
-+struct netrx_pending_operations {
-+	unsigned trans_prod, trans_cons;
-+	unsigned mmu_prod, mmu_cons;
-+	unsigned mcl_prod, mcl_cons;
-+	unsigned copy_prod, copy_cons;
-+	unsigned meta_prod, meta_cons;
-+	mmu_update_t *mmu;
-+	gnttab_transfer_t *trans;
-+	gnttab_copy_t *copy;
-+	multicall_entry_t *mcl;
-+	struct netbk_rx_meta *meta;
-+};
 +
-+/* Set up the grant operations for this fragment.  If it's a flipping
-+   interface, we also set up the unmap request from here. */
-+static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
-+			  int i, struct netrx_pending_operations *npo,
-+			  struct page *page, unsigned long size,
-+			  unsigned long offset)
++static int __devinit pcistub_match_one(struct pci_dev *dev,
++				       struct pcistub_device_id *pdev_id)
 +{
-+	mmu_update_t *mmu;
-+	gnttab_transfer_t *gop;
-+	gnttab_copy_t *copy_gop;
-+	multicall_entry_t *mcl;
-+	netif_rx_request_t *req;
-+	unsigned long old_mfn, new_mfn;
-+
-+	old_mfn = virt_to_mfn(page_address(page));
++	/* Match the specified device by domain, bus, slot, func and also if
++	 * any of the device's parent bridges match.
++	 */
++	for (; dev != NULL; dev = dev->bus->self) {
++		if (pci_domain_nr(dev->bus) == pdev_id->domain
++		    && dev->bus->number == pdev_id->bus
++		    && dev->devfn == pdev_id->devfn)
++			return 1;
 +
-+	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
-+	if (netif->copying_receiver) {
-+		/* The fragment needs to be copied rather than
-+		   flipped. */
-+		meta->copy = 1;
-+		copy_gop = npo->copy + npo->copy_prod++;
-+		copy_gop->flags = GNTCOPY_dest_gref;
-+		if (PageForeign(page)) {
-+			struct pending_tx_info *src_pend =
-+				&pending_tx_info[netif_page_index(page)];
-+			copy_gop->source.domid = src_pend->netif->domid;
-+			copy_gop->source.u.ref = src_pend->req.gref;
-+			copy_gop->flags |= GNTCOPY_source_gref;
-+		} else {
-+			copy_gop->source.domid = DOMID_SELF;
-+			copy_gop->source.u.gmfn = old_mfn;
-+		}
-+		copy_gop->source.offset = offset;
-+		copy_gop->dest.domid = netif->domid;
-+		copy_gop->dest.offset = 0;
-+		copy_gop->dest.u.ref = req->gref;
-+		copy_gop->len = size;
-+	} else {
-+		meta->copy = 0;
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			new_mfn = alloc_mfn();
++		/* Sometimes topmost bridge links to itself. */
++		if (dev == dev->bus->self)
++			break;
++	}
 +
-+			/*
-+			 * Set the new P2M table entry before
-+			 * reassigning the old data page. Heed the
-+			 * comment in pgtable-2level.h:pte_page(). :-)
-+			 */
-+			set_phys_to_machine(page_to_pfn(page), new_mfn);
++	return 0;
++}
 +
-+			mcl = npo->mcl + npo->mcl_prod++;
-+			MULTI_update_va_mapping(mcl,
-+					     (unsigned long)page_address(page),
-+					     pfn_pte_ma(new_mfn, PAGE_KERNEL),
-+					     0);
++static int __devinit pcistub_match(struct pci_dev *dev)
++{
++	struct pcistub_device_id *pdev_id;
++	unsigned long flags;
++	int found = 0;
 +
-+			mmu = npo->mmu + npo->mmu_prod++;
-+			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-+				MMU_MACHPHYS_UPDATE;
-+			mmu->val = page_to_pfn(page);
++	spin_lock_irqsave(&device_ids_lock, flags);
++	list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
++		if (pcistub_match_one(dev, pdev_id)) {
++			found = 1;
++			break;
 +		}
-+
-+		gop = npo->trans + npo->trans_prod++;
-+		gop->mfn = old_mfn;
-+		gop->domid = netif->domid;
-+		gop->ref = req->gref;
 +	}
-+	return req->id;
++	spin_unlock_irqrestore(&device_ids_lock, flags);
++
++	return found;
 +}
 +
-+static void netbk_gop_skb(struct sk_buff *skb,
-+			  struct netrx_pending_operations *npo)
++static int __devinit pcistub_init_device(struct pci_dev *dev)
 +{
-+	netif_t *netif = netdev_priv(skb->dev);
-+	int nr_frags = skb_shinfo(skb)->nr_frags;
-+	int i;
-+	int extra;
-+	struct netbk_rx_meta *head_meta, *meta;
++	struct pciback_dev_data *dev_data;
++	int err = 0;
 +
-+	head_meta = npo->meta + npo->meta_prod++;
-+	head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
-+	head_meta->frag.size = skb_shinfo(skb)->gso_size;
-+	extra = !!head_meta->frag.size + 1;
++	dev_dbg(&dev->dev, "initializing...\n");
 +
-+	for (i = 0; i < nr_frags; i++) {
-+		meta = npo->meta + npo->meta_prod++;
-+		meta->frag = skb_shinfo(skb)->frags[i];
-+		meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
-+					  meta->frag.page,
-+					  meta->frag.size,
-+					  meta->frag.page_offset);
++	/* The PCI backend is not intended to be a module (or to work with
++	 * removable PCI devices (yet). If it were, pciback_config_free()
++	 * would need to be called somewhere to free the memory allocated
++	 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
++	 */
++	dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
++	if (!dev_data) {
++		err = -ENOMEM;
++		goto out;
 +	}
++	pci_set_drvdata(dev, dev_data);
 +
-+	/*
-+	 * This must occur at the end to ensure that we don't trash skb_shinfo
-+	 * until we're done. We know that the head doesn't cross a page
-+	 * boundary because such packets get copied in netif_be_start_xmit.
++	dev_dbg(&dev->dev, "initializing config\n");
++	err = pciback_config_init_dev(dev);
++	if (err)
++		goto out;
++
++	/* HACK: Force device (& ACPI) to determine what IRQ it's on - we
++	 * must do this here because pcibios_enable_device may specify
++	 * the pci device's true irq (and possibly its other resources)
++	 * if they differ from what's in the configuration space.
++	 * This makes the assumption that the device's resources won't
++	 * change after this point (otherwise this code may break!)
 +	 */
-+	head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
-+				       virt_to_page(skb->data),
-+				       skb_headlen(skb),
-+				       offset_in_page(skb->data));
++	dev_dbg(&dev->dev, "enabling device\n");
++	err = pci_enable_device(dev);
++	if (err)
++		goto config_release;
 +
-+	netif->rx.req_cons += nr_frags + extra;
-+}
++	/* Now disable the device (this also ensures some private device
++	 * data is setup before we export)
++	 */
++	dev_dbg(&dev->dev, "reset device\n");
++	pciback_reset_device(dev);
 +
-+static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
-+{
-+	int i;
++	return 0;
 +
-+	for (i = 0; i < nr_frags; i++)
-+		put_page(meta[i].frag.page);
++      config_release:
++	pciback_config_free_dev(dev);
++
++      out:
++	pci_set_drvdata(dev, NULL);
++	kfree(dev_data);
++	return err;
 +}
 +
-+/* This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
-+   used to set up the operations on the top of
-+   netrx_pending_operations, which have since been done.  Check that
-+   they didn't give any errors and advance over them. */
-+static int netbk_check_gop(int nr_frags, domid_t domid,
-+			   struct netrx_pending_operations *npo)
++/*
++ * Because some initialization still happens on
++ * devices during fs_initcall, we need to defer
++ * full initialization of our devices until
++ * device_initcall.
++ */
++static int __init pcistub_init_devices_late(void)
 +{
-+	multicall_entry_t *mcl;
-+	gnttab_transfer_t *gop;
-+	gnttab_copy_t     *copy_op;
-+	int status = NETIF_RSP_OKAY;
-+	int i;
++	struct pcistub_device *psdev;
++	unsigned long flags;
++	int err = 0;
 +
-+	for (i = 0; i <= nr_frags; i++) {
-+		if (npo->meta[npo->meta_cons + i].copy) {
-+			copy_op = npo->copy + npo->copy_cons++;
-+			if (copy_op->status != GNTST_okay) {
-+				DPRINTK("Bad status %d from copy to DOM%d.\n",
-+					copy_op->status, domid);
-+				status = NETIF_RSP_ERROR;
-+			}
-+		} else {
-+			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+				mcl = npo->mcl + npo->mcl_cons++;
-+				/* The update_va_mapping() must not fail. */
-+				BUG_ON(mcl->result != 0);
-+			}
++	pr_debug("pciback: pcistub_init_devices_late\n");
 +
-+			gop = npo->trans + npo->trans_cons++;
-+			/* Check the reassignment error code. */
-+			if (gop->status != 0) {
-+				DPRINTK("Bad status %d from grant transfer to DOM%u\n",
-+					gop->status, domid);
-+				/*
-+				 * Page no longer belongs to us unless
-+				 * GNTST_bad_page, but that should be
-+				 * a fatal error anyway.
-+				 */
-+				BUG_ON(gop->status == GNTST_bad_page);
-+				status = NETIF_RSP_ERROR;
-+			}
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++	while (!list_empty(&seized_devices)) {
++		psdev = container_of(seized_devices.next,
++				     struct pcistub_device, dev_list);
++		list_del(&psdev->dev_list);
++
++		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++		err = pcistub_init_device(psdev->dev);
++		if (err) {
++			dev_err(&psdev->dev->dev,
++				"error %d initializing device\n", err);
++			kfree(psdev);
++			psdev = NULL;
 +		}
++
++		spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++		if (psdev)
++			list_add_tail(&psdev->dev_list, &pcistub_devices);
 +	}
 +
-+	return status;
++	initialize_devices = 1;
++
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++	return 0;
 +}
 +
-+static void netbk_add_frag_responses(netif_t *netif, int status,
-+				     struct netbk_rx_meta *meta, int nr_frags)
++static int __devinit pcistub_seize(struct pci_dev *dev)
 +{
-+	int i;
-+	unsigned long offset;
++	struct pcistub_device *psdev;
++	unsigned long flags;
++	int err = 0;
 +
-+	for (i = 0; i < nr_frags; i++) {
-+		int id = meta[i].id;
-+		int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
++	psdev = pcistub_device_alloc(dev);
++	if (!psdev)
++		return -ENOMEM;
 +
-+		if (meta[i].copy)
-+			offset = 0;
-+		else
-+			offset = meta[i].frag.page_offset;
-+		make_rx_response(netif, id, status, offset,
-+				 meta[i].frag.size, flags);
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++	if (initialize_devices) {
++		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++		/* don't want irqs disabled when calling pcistub_init_device */
++		err = pcistub_init_device(psdev->dev);
++
++		spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++		if (!err)
++			list_add(&psdev->dev_list, &pcistub_devices);
++	} else {
++		dev_dbg(&dev->dev, "deferring initialization\n");
++		list_add(&psdev->dev_list, &seized_devices);
 +	}
++
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++	if (err)
++		pcistub_device_put(psdev);
++
++	return err;
 +}
 +
-+static void net_rx_action(unsigned long unused)
++static int __devinit pcistub_probe(struct pci_dev *dev,
++				   const struct pci_device_id *id)
 +{
-+	netif_t *netif = NULL;
-+	s8 status;
-+	u16 id, irq, flags;
-+	netif_rx_response_t *resp;
-+	multicall_entry_t *mcl;
-+	struct sk_buff_head rxq;
-+	struct sk_buff *skb;
-+	int notify_nr = 0;
-+	int ret;
-+	int nr_frags;
-+	int count;
-+	unsigned long offset;
++	int err = 0;
 +
-+	/*
-+	 * Putting hundreds of bytes on the stack is considered rude.
-+	 * Static works because a tasklet can only be on one CPU at any time.
-+	 */
-+	static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
-+	static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+	static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
-+	static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
-+	static unsigned char rx_notify[NR_IRQS];
-+	static u16 notify_list[NET_RX_RING_SIZE];
-+	static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++	dev_dbg(&dev->dev, "probing...\n");
 +
-+	struct netrx_pending_operations npo = {
-+		mmu: rx_mmu,
-+		trans: grant_trans_op,
-+		copy: grant_copy_op,
-+		mcl: rx_mcl,
-+		meta: meta};
++	if (pcistub_match(dev)) {
 +
-+	skb_queue_head_init(&rxq);
++		if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
++		    && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++			dev_err(&dev->dev, "can't export pci devices that "
++				"don't have a normal (0) or bridge (1) "
++				"header type!\n");
++			err = -ENODEV;
++			goto out;
++		}
 +
-+	count = 0;
++		dev_info(&dev->dev, "seizing device\n");
++		err = pcistub_seize(dev);
++	} else
++		/* Didn't find the device */
++		err = -ENODEV;
 +
-+	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
-+		nr_frags = skb_shinfo(skb)->nr_frags;
-+		*(int *)skb->cb = nr_frags;
++      out:
++	return err;
++}
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap) &&
-+		    !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
-+		    check_mfn(nr_frags + 1)) {
-+			/* Memory squeeze? Back off for an arbitrary while. */
-+			if ( net_ratelimit() )
-+				WPRINTK("Memory squeeze in netback "
-+					"driver.\n");
-+			mod_timer(&net_timer, jiffies + HZ);
-+			skb_queue_head(&rx_queue, skb);
-+			break;
-+		}
++static void pcistub_remove(struct pci_dev *dev)
++{
++	struct pcistub_device *psdev, *found_psdev = NULL;
++	unsigned long flags;
 +
-+		netbk_gop_skb(skb, &npo);
++	dev_dbg(&dev->dev, "removing\n");
 +
-+		count += nr_frags + 1;
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+		__skb_queue_tail(&rxq, skb);
++	pciback_config_quirk_release(dev);
 +
-+		/* Filled the batch queue? */
-+		if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (psdev->dev == dev) {
++			found_psdev = psdev;
 +			break;
++		}
 +	}
 +
-+	if (npo.mcl_prod &&
-+	    !xen_feature(XENFEAT_auto_translated_physmap)) {
-+		mcl = npo.mcl + npo.mcl_prod++;
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
 +
-+		BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
-+		mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++	if (found_psdev) {
++		dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
++			found_psdev->pdev);
 +
-+		mcl->op = __HYPERVISOR_mmu_update;
-+		mcl->args[0] = (unsigned long)rx_mmu;
-+		mcl->args[1] = npo.mmu_prod;
-+		mcl->args[2] = 0;
-+		mcl->args[3] = DOMID_SELF;
-+	}
++		if (found_psdev->pdev) {
++			printk(KERN_WARNING "pciback: ****** removing device "
++			       "%s while still in-use! ******\n",
++			       pci_name(found_psdev->dev));
++			printk(KERN_WARNING "pciback: ****** driver domain may "
++			       "still access this device's i/o resources!\n");
++			printk(KERN_WARNING "pciback: ****** shutdown driver "
++			       "domain before binding device\n");
++			printk(KERN_WARNING "pciback: ****** to other drivers "
++			       "or domains\n");
 +
-+	if (npo.trans_prod) {
-+		mcl = npo.mcl + npo.mcl_prod++;
-+		mcl->op = __HYPERVISOR_grant_table_op;
-+		mcl->args[0] = GNTTABOP_transfer;
-+		mcl->args[1] = (unsigned long)grant_trans_op;
-+		mcl->args[2] = npo.trans_prod;
-+	}
++			pciback_release_pci_dev(found_psdev->pdev,
++						found_psdev->dev);
++		}
 +
-+	if (npo.copy_prod) {
-+		mcl = npo.mcl + npo.mcl_prod++;
-+		mcl->op = __HYPERVISOR_grant_table_op;
-+		mcl->args[0] = GNTTABOP_copy;
-+		mcl->args[1] = (unsigned long)grant_copy_op;
-+		mcl->args[2] = npo.copy_prod;
++		spin_lock_irqsave(&pcistub_devices_lock, flags);
++		list_del(&found_psdev->dev_list);
++		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++		/* the final put for releasing from the list */
++		pcistub_device_put(found_psdev);
 +	}
++}
 +
-+	/* Nothing to do? */
-+	if (!npo.mcl_prod)
-+		return;
++static struct pci_device_id pcistub_ids[] = {
++	{
++	 .vendor = PCI_ANY_ID,
++	 .device = PCI_ANY_ID,
++	 .subvendor = PCI_ANY_ID,
++	 .subdevice = PCI_ANY_ID,
++	 },
++	{0,},
++};
 +
-+	BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
-+	BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
-+	BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
-+	BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
-+	BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
++/*
++ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
++ * for a normal device. I don't want it to be loaded automatically.
++ */
 +
-+	ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
-+	BUG_ON(ret != 0);
++static struct pci_driver pciback_pci_driver = {
++	.name = "pciback",
++	.id_table = pcistub_ids,
++	.probe = pcistub_probe,
++	.remove = pcistub_remove,
++};
 +
-+	while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+		nr_frags = *(int *)skb->cb;
++static inline int str_to_slot(const char *buf, int *domain, int *bus,
++			      int *slot, int *func)
++{
++	int err;
 +
-+		netif = netdev_priv(skb->dev);
-+		/* We can't rely on skb_release_data to release the
-+		   pages used by fragments for us, since it tries to
-+		   touch the pages in the fraglist.  If we're in
-+		   flipping mode, that doesn't work.  In copying mode,
-+		   we still have access to all of the pages, and so
-+		   it's safe to let release_data deal with it. */
-+		/* (Freeing the fragments is safe since we copy
-+		   non-linear skbs destined for flipping interfaces) */
-+		if (!netif->copying_receiver) {
-+			atomic_set(&(skb_shinfo(skb)->dataref), 1);
-+			skb_shinfo(skb)->frag_list = NULL;
-+			skb_shinfo(skb)->nr_frags = 0;
-+			netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
-+		}
++	err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
++	if (err == 4)
++		return 0;
++	else if (err < 0)
++		return -EINVAL;
 +
-+		netif->stats.tx_bytes += skb->len;
-+		netif->stats.tx_packets++;
++	/* try again without domain */
++	*domain = 0;
++	err = sscanf(buf, " %x:%x.%x", bus, slot, func);
++	if (err == 3)
++		return 0;
 +
-+		status = netbk_check_gop(nr_frags, netif->domid, &npo);
++	return -EINVAL;
++}
 +
-+		id = meta[npo.meta_cons].id;
-+		flags = nr_frags ? NETRXF_more_data : 0;
++static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
++			       *slot, int *func, int *reg, int *size, int *mask)
++{
++	int err;
 +
-+		if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
-+			flags |= NETRXF_csum_blank | NETRXF_data_validated;
-+		else if (skb->proto_data_valid) /* remote but checksummed? */
-+			flags |= NETRXF_data_validated;
++	err =
++	    sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
++		   func, reg, size, mask);
++	if (err == 7)
++		return 0;
++	return -EINVAL;
++}
 +
-+		if (meta[npo.meta_cons].copy)
-+			offset = 0;
-+		else
-+			offset = offset_in_page(skb->data);
-+		resp = make_rx_response(netif, id, status, offset,
-+					skb_headlen(skb), flags);
++static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++{
++	struct pcistub_device_id *pci_dev_id;
++	unsigned long flags;
++
++	pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
++	if (!pci_dev_id)
++		return -ENOMEM;
++
++	pci_dev_id->domain = domain;
++	pci_dev_id->bus = bus;
++	pci_dev_id->devfn = PCI_DEVFN(slot, func);
++
++	pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
++		 domain, bus, slot, func);
++
++	spin_lock_irqsave(&device_ids_lock, flags);
++	list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
++	spin_unlock_irqrestore(&device_ids_lock, flags);
 +
-+		if (meta[npo.meta_cons].frag.size) {
-+			struct netif_extra_info *gso =
-+				(struct netif_extra_info *)
-+				RING_GET_RESPONSE(&netif->rx,
-+						  netif->rx.rsp_prod_pvt++);
++	return 0;
++}
 +
-+			resp->flags |= NETRXF_extra_info;
++static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
++{
++	struct pcistub_device_id *pci_dev_id, *t;
++	int devfn = PCI_DEVFN(slot, func);
++	int err = -ENOENT;
++	unsigned long flags;
 +
-+			gso->u.gso.size = meta[npo.meta_cons].frag.size;
-+			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
-+			gso->u.gso.pad = 0;
-+			gso->u.gso.features = 0;
++	spin_lock_irqsave(&device_ids_lock, flags);
++	list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
 +
-+			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-+			gso->flags = 0;
-+		}
++		if (pci_dev_id->domain == domain
++		    && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
++			/* Don't break; here because it's possible the same
++			 * slot could be in the list more than once
++			 */
++			list_del(&pci_dev_id->slot_list);
++			kfree(pci_dev_id);
 +
-+		netbk_add_frag_responses(netif, status,
-+					 meta + npo.meta_cons + 1,
-+					 nr_frags);
++			err = 0;
 +
-+		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
-+		irq = netif->irq;
-+		if (ret && !rx_notify[irq]) {
-+			rx_notify[irq] = 1;
-+			notify_list[notify_nr++] = irq;
++			pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
++				 "seize list\n", domain, bus, slot, func);
 +		}
++	}
++	spin_unlock_irqrestore(&device_ids_lock, flags);
 +
-+		if (netif_queue_stopped(netif->dev) &&
-+		    netif_schedulable(netif) &&
-+		    !netbk_queue_full(netif))
-+			netif_wake_queue(netif->dev);
++	return err;
++}
 +
-+		netif_put(netif);
-+		dev_kfree_skb(skb);
-+		npo.meta_cons += nr_frags + 1;
++static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
++			   int size, int mask)
++{
++	int err = 0;
++	struct pcistub_device *psdev;
++	struct pci_dev *dev;
++	struct config_field *field;
++
++	psdev = pcistub_device_find(domain, bus, slot, func);
++	if (!psdev || !psdev->dev) {
++		err = -ENODEV;
++		goto out;
 +	}
++	dev = psdev->dev;
 +
-+	while (notify_nr != 0) {
-+		irq = notify_list[--notify_nr];
-+		rx_notify[irq] = 0;
-+		notify_remote_via_irq(irq);
++	field = kzalloc(sizeof(*field), GFP_ATOMIC);
++	if (!field) {
++		err = -ENOMEM;
++		goto out;
 +	}
 +
-+	/* More work to do? */
-+	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-+		tasklet_schedule(&net_rx_tasklet);
-+#if 0
-+	else
-+		xen_network_done_notify();
-+#endif
-+}
++	field->offset = reg;
++	field->size = size;
++	field->mask = mask;
++	field->init = NULL;
++	field->reset = NULL;
++	field->release = NULL;
++	field->clean = pciback_config_field_free;
 +
-+static void net_alarm(unsigned long unused)
-+{
-+	tasklet_schedule(&net_rx_tasklet);
++	err = pciback_config_quirks_add_field(dev, field);
++	if (err)
++		kfree(field);
++      out:
++	return err;
 +}
 +
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
++				size_t count)
 +{
-+	netif_t *netif = netdev_priv(dev);
-+	return &netif->stats;
-+}
++	int domain, bus, slot, func;
++	int err;
 +
-+static int __on_net_schedule_list(netif_t *netif)
-+{
-+	return netif->list.next != NULL;
-+}
++	err = str_to_slot(buf, &domain, &bus, &slot, &func);
++	if (err)
++		goto out;
 +
-+static void remove_from_net_schedule_list(netif_t *netif)
-+{
-+	spin_lock_irq(&net_schedule_list_lock);
-+	if (likely(__on_net_schedule_list(netif))) {
-+		list_del(&netif->list);
-+		netif->list.next = NULL;
-+		netif_put(netif);
-+	}
-+	spin_unlock_irq(&net_schedule_list_lock);
++	err = pcistub_device_id_add(domain, bus, slot, func);
++
++      out:
++	if (!err)
++		err = count;
++	return err;
 +}
 +
-+static void add_to_net_schedule_list_tail(netif_t *netif)
++DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
++
++static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
++				   size_t count)
 +{
-+	if (__on_net_schedule_list(netif))
-+		return;
++	int domain, bus, slot, func;
++	int err;
 +
-+	spin_lock_irq(&net_schedule_list_lock);
-+	if (!__on_net_schedule_list(netif) &&
-+	    likely(netif_schedulable(netif))) {
-+		list_add_tail(&netif->list, &net_schedule_list);
-+		netif_get(netif);
-+	}
-+	spin_unlock_irq(&net_schedule_list_lock);
++	err = str_to_slot(buf, &domain, &bus, &slot, &func);
++	if (err)
++		goto out;
++
++	err = pcistub_device_id_remove(domain, bus, slot, func);
++
++      out:
++	if (!err)
++		err = count;
++	return err;
 +}
 +
-+/*
-+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
-+ * If this driver is pipelining transmit requests then we can be very
-+ * aggressive in avoiding new-packet notifications -- frontend only needs to
-+ * send a notification if there are no outstanding unreceived responses.
-+ * If we may be buffer transmit buffers for any reason then we must be rather
-+ * more conservative and treat this as the final check for pending work.
-+ */
-+void netif_schedule_work(netif_t *netif)
++DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
++
++static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
 +{
-+	int more_to_do;
++	struct pcistub_device_id *pci_dev_id;
++	size_t count = 0;
++	unsigned long flags;
 +
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+	more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-+#else
-+	RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+#endif
++	spin_lock_irqsave(&device_ids_lock, flags);
++	list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
++		if (count >= PAGE_SIZE)
++			break;
 +
-+	if (more_to_do) {
-+		add_to_net_schedule_list_tail(netif);
-+		maybe_schedule_tx_action();
++		count += scnprintf(buf + count, PAGE_SIZE - count,
++				   "%04x:%02x:%02x.%01x\n",
++				   pci_dev_id->domain, pci_dev_id->bus,
++				   PCI_SLOT(pci_dev_id->devfn),
++				   PCI_FUNC(pci_dev_id->devfn));
 +	}
-+}
++	spin_unlock_irqrestore(&device_ids_lock, flags);
 +
-+void netif_deschedule_work(netif_t *netif)
-+{
-+	remove_from_net_schedule_list(netif);
++	return count;
 +}
 +
++DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
 +
-+static void tx_add_credit(netif_t *netif)
++static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
++				 size_t count)
 +{
-+	unsigned long max_burst, max_credit;
++	int domain, bus, slot, func, reg, size, mask;
++	int err;
 +
-+	/*
-+	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
-+	 * Otherwise the interface can seize up due to insufficient credit.
-+	 */
-+	max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
-+	max_burst = min(max_burst, 131072UL);
-+	max_burst = max(max_burst, netif->credit_bytes);
++	err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
++			   &mask);
++	if (err)
++		goto out;
 +
-+	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
-+	max_credit = netif->remaining_credit + netif->credit_bytes;
-+	if (max_credit < netif->remaining_credit)
-+		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++	err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
 +
-+	netif->remaining_credit = min(max_credit, max_burst);
++      out:
++	if (!err)
++		err = count;
++	return err;
 +}
 +
-+static void tx_credit_callback(unsigned long data)
++static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
 +{
-+	netif_t *netif = (netif_t *)data;
-+	tx_add_credit(netif);
-+	netif_schedule_work(netif);
-+}
++	int count = 0;
++	unsigned long flags;
++	extern struct list_head pciback_quirks;
++	struct pciback_config_quirk *quirk;
++	struct pciback_dev_data *dev_data;
++	struct config_field *field;
++	struct config_field_entry *cfg_entry;
 +
-+inline static void net_tx_action_dealloc(void)
-+{
-+	gnttab_unmap_grant_ref_t *gop;
-+	u16 pending_idx;
-+	PEND_RING_IDX dc, dp;
-+	netif_t *netif;
-+	int ret;
++	spin_lock_irqsave(&device_ids_lock, flags);
++	list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
++		if (count >= PAGE_SIZE)
++			goto out;
 +
-+	dc = dealloc_cons;
-+	dp = dealloc_prod;
++		count += scnprintf(buf + count, PAGE_SIZE - count,
++				   "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
++				   quirk->pdev->bus->number,
++				   PCI_SLOT(quirk->pdev->devfn),
++				   PCI_FUNC(quirk->pdev->devfn),
++				   quirk->devid.vendor, quirk->devid.device,
++				   quirk->devid.subvendor,
++				   quirk->devid.subdevice);
 +
-+	/* Ensure we see all indexes enqueued by netif_idx_release(). */
-+	smp_rmb();
++		dev_data = pci_get_drvdata(quirk->pdev);
 +
-+	/*
-+	 * Free up any grants we have finished using
-+	 */
-+	gop = tx_unmap_ops;
-+	while (dc != dp) {
-+		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-+		gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
-+				    GNTMAP_host_map,
-+				    grant_tx_handle[pending_idx]);
-+		gop++;
-+	}
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
-+	BUG_ON(ret);
++		list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++			field = cfg_entry->field;
++			if (count >= PAGE_SIZE)
++				goto out;
 +
-+	while (dealloc_cons != dp) {
-+		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
++			count += scnprintf(buf + count, PAGE_SIZE - count,
++					   "\t\t%08x:%01x:%08x\n",
++					   cfg_entry->base_offset + field->offset, 
++					   field->size, field->mask);
++		}
++	}
 +
-+		netif = pending_tx_info[pending_idx].netif;
++      out:
++	spin_unlock_irqrestore(&device_ids_lock, flags);
 +
-+		make_tx_response(netif, &pending_tx_info[pending_idx].req, 
-+				 NETIF_RSP_OKAY);
++	return count;
++}
 +
-+		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
 +
-+		netif_put(netif);
++static ssize_t permissive_add(struct device_driver *drv, const char *buf,
++			      size_t count)
++{
++	int domain, bus, slot, func;
++	int err;
++	struct pcistub_device *psdev;
++	struct pciback_dev_data *dev_data;
++	err = str_to_slot(buf, &domain, &bus, &slot, &func);
++	if (err)
++		goto out;
++	psdev = pcistub_device_find(domain, bus, slot, func);
++	if (!psdev) {
++		err = -ENODEV;
++		goto out;
++	}
++	if (!psdev->dev) {
++		err = -ENODEV;
++		goto release;
++	}
++	dev_data = pci_get_drvdata(psdev->dev);
++	/* the driver data for a device should never be null at this point */
++	if (!dev_data) {
++		err = -ENXIO;
++		goto release;
++	}
++	if (!dev_data->permissive) {
++		dev_data->permissive = 1;
++		/* Let user know that what they're doing could be unsafe */
++		dev_warn(&psdev->dev->dev,
++			 "enabling permissive mode configuration space accesses!\n");
++		dev_warn(&psdev->dev->dev,
++			 "permissive mode is potentially unsafe!\n");
 +	}
++      release:
++	pcistub_device_put(psdev);
++      out:
++	if (!err)
++		err = count;
++	return err;
 +}
 +
-+static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
++static ssize_t permissive_show(struct device_driver *drv, char *buf)
 +{
-+	RING_IDX cons = netif->tx.req_cons;
-+
-+	do {
-+		make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+		if (cons >= end)
++	struct pcistub_device *psdev;
++	struct pciback_dev_data *dev_data;
++	size_t count = 0;
++	unsigned long flags;
++	spin_lock_irqsave(&pcistub_devices_lock, flags);
++	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++		if (count >= PAGE_SIZE)
 +			break;
-+		txp = RING_GET_REQUEST(&netif->tx, cons++);
-+	} while (1);
-+	netif->tx.req_cons = cons;
-+	netif_schedule_work(netif);
-+	netif_put(netif);
++		if (!psdev->dev)
++			continue;
++		dev_data = pci_get_drvdata(psdev->dev);
++		if (!dev_data || !dev_data->permissive)
++			continue;
++		count +=
++		    scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++			      pci_name(psdev->dev));
++	}
++	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	return count;
 +}
 +
-+static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
-+				netif_tx_request_t *txp, int work_to_do)
-+{
-+	RING_IDX cons = netif->tx.req_cons;
-+	int frags = 0;
-+
-+	if (!(first->flags & NETTXF_more_data))
-+		return 0;
-+
-+	do {
-+		if (frags >= work_to_do) {
-+			DPRINTK("Need more frags\n");
-+			return -frags;
-+		}
++DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
 +
-+		if (unlikely(frags >= MAX_SKB_FRAGS)) {
-+			DPRINTK("Too many frags\n");
-+			return -frags;
-+		}
++#ifdef CONFIG_PCI_MSI
 +
-+		memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
-+		       sizeof(*txp));
-+		if (txp->size > first->size) {
-+			DPRINTK("Frags galore\n");
-+			return -frags;
-+		}
++int pciback_get_owner(struct pci_dev *dev)
++{
++	struct pcistub_device *psdev;
 +
-+		first->size -= txp->size;
-+		frags++;
++	psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number,
++			PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
 +
-+		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-+			DPRINTK("txp->offset: %x, size: %u\n",
-+				txp->offset, txp->size);
-+			return -frags;
-+		}
-+	} while ((txp++)->flags & NETTXF_more_data);
++	if (!psdev || !psdev->pdev)
++		return -1;
 +
-+	return frags;
++	return psdev->pdev->xdev->otherend_id;
 +}
++#endif
 +
-+static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
-+						  struct sk_buff *skb,
-+						  netif_tx_request_t *txp,
-+						  gnttab_map_grant_ref_t *mop)
++static void pcistub_exit(void)
 +{
-+	struct skb_shared_info *shinfo = skb_shinfo(skb);
-+	skb_frag_t *frags = shinfo->frags;
-+	unsigned long pending_idx = *((u16 *)skb->data);
-+	int i, start;
-+
-+	/* Skip first skb fragment if it is on same page as header fragment. */
-+	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
++	driver_remove_file(&pciback_pci_driver.driver,
++			   &driver_attr_remove_slot);
++	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
++	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
++	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
 +
-+	for (i = start; i < shinfo->nr_frags; i++, txp++) {
-+		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
++	pci_unregister_driver(&pciback_pci_driver);
++	WARN_ON(unregister_msi_get_owner(pciback_get_owner));
++}
 +
-+		gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
-+				  GNTMAP_host_map | GNTMAP_readonly,
-+				  txp->gref, netif->domid);
++static int __init pcistub_init(void)
++{
++	int pos = 0;
++	int err = 0;
++	int domain, bus, slot, func;
++	int parsed;
 +
-+		memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
-+		netif_get(netif);
-+		pending_tx_info[pending_idx].netif = netif;
-+		frags[i].page = (void *)pending_idx;
-+	}
++	if (pci_devs_to_hide && *pci_devs_to_hide) {
++		do {
++			parsed = 0;
 +
-+	return mop;
-+}
++			err = sscanf(pci_devs_to_hide + pos,
++				     " (%x:%x:%x.%x) %n",
++				     &domain, &bus, &slot, &func, &parsed);
++			if (err != 4) {
++				domain = 0;
++				err = sscanf(pci_devs_to_hide + pos,
++					     " (%x:%x.%x) %n",
++					     &bus, &slot, &func, &parsed);
++				if (err != 3)
++					goto parse_error;
++			}
 +
-+static int netbk_tx_check_mop(struct sk_buff *skb,
-+			       gnttab_map_grant_ref_t **mopp)
-+{
-+	gnttab_map_grant_ref_t *mop = *mopp;
-+	int pending_idx = *((u16 *)skb->data);
-+	netif_t *netif = pending_tx_info[pending_idx].netif;
-+	netif_tx_request_t *txp;
-+	struct skb_shared_info *shinfo = skb_shinfo(skb);
-+	int nr_frags = shinfo->nr_frags;
-+	int i, err, start;
++			err = pcistub_device_id_add(domain, bus, slot, func);
++			if (err)
++				goto out;
 +
-+	/* Check status of header. */
-+	err = mop->status;
-+	if (unlikely(err)) {
-+		txp = &pending_tx_info[pending_idx].req;
-+		make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+		netif_put(netif);
-+	} else {
-+		set_phys_to_machine(
-+			__pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
-+			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-+		grant_tx_handle[pending_idx] = mop->handle;
++			/* if parsed<=0, we've reached the end of the string */
++			pos += parsed;
++		} while (parsed > 0 && pci_devs_to_hide[pos]);
 +	}
 +
-+	/* Skip first skb fragment if it is on same page as header fragment. */
-+	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++	/* If we're the first PCI Device Driver to register, we're the
++	 * first one to get offered PCI devices as they become
++	 * available (and thus we can be the first to grab them)
++	 */
++	err = pci_register_driver(&pciback_pci_driver);
++	if (err < 0)
++		goto out;
 +
-+	for (i = start; i < nr_frags; i++) {
-+		int j, newerr;
++	err = driver_create_file(&pciback_pci_driver.driver,
++				 &driver_attr_new_slot);
++	if (!err)
++		err = driver_create_file(&pciback_pci_driver.driver,
++					 &driver_attr_remove_slot);
++	if (!err)
++		err = driver_create_file(&pciback_pci_driver.driver,
++					 &driver_attr_slots);
++	if (!err)
++		err = driver_create_file(&pciback_pci_driver.driver,
++					 &driver_attr_quirks);
++	if (!err)
++		err = driver_create_file(&pciback_pci_driver.driver,
++					 &driver_attr_permissive);
 +
-+		pending_idx = (unsigned long)shinfo->frags[i].page;
++	if (!err)
++		err = register_msi_get_owner(pciback_get_owner);
++	if (err)
++		pcistub_exit();
 +
-+		/* Check error status: if okay then remember grant handle. */
-+		newerr = (++mop)->status;
-+		if (likely(!newerr)) {
-+			set_phys_to_machine(
-+				__pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
-+				FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
-+			grant_tx_handle[pending_idx] = mop->handle;
-+			/* Had a previous error? Invalidate this fragment. */
-+			if (unlikely(err))
-+				netif_idx_release(pending_idx);
-+			continue;
-+		}
++      out:
++	return err;
 +
-+		/* Error on this fragment: respond to client with an error. */
-+		txp = &pending_tx_info[pending_idx].req;
-+		make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+		netif_put(netif);
++      parse_error:
++	printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
++	       pci_devs_to_hide + pos);
++	return -EINVAL;
++}
 +
-+		/* Not the first error? Preceding frags already invalidated. */
-+		if (err)
-+			continue;
++#ifndef MODULE
++/*
++ * fs_initcall happens before device_initcall
++ * so pciback *should* get called first (b/c we 
++ * want to suck up any device before other drivers
++ * get a chance by being the first pci device
++ * driver to register)
++ */
++fs_initcall(pcistub_init);
++#endif
 +
-+		/* First error: invalidate header and preceding fragments. */
-+		pending_idx = *((u16 *)skb->data);
-+		netif_idx_release(pending_idx);
-+		for (j = start; j < i; j++) {
-+			pending_idx = (unsigned long)shinfo->frags[i].page;
-+			netif_idx_release(pending_idx);
-+		}
++static int __init pciback_init(void)
++{
++	int err;
 +
-+		/* Remember the error: invalidate all subsequent fragments. */
-+		err = newerr;
-+	}
++	err = pciback_config_init();
++	if (err)
++		return err;
++
++#ifdef MODULE
++	err = pcistub_init();
++	if (err < 0)
++		return err;
++#endif
++
++	pcistub_init_devices_late();
++	err = pciback_xenbus_register();
++	if (err)
++		pcistub_exit();
 +
-+	*mopp = mop + 1;
 +	return err;
 +}
 +
-+static void netbk_fill_frags(struct sk_buff *skb)
++static void __exit pciback_cleanup(void)
 +{
-+	struct skb_shared_info *shinfo = skb_shinfo(skb);
-+	int nr_frags = shinfo->nr_frags;
-+	int i;
++	pciback_xenbus_unregister();
++	pcistub_exit();
++}
 +
-+	for (i = 0; i < nr_frags; i++) {
-+		skb_frag_t *frag = shinfo->frags + i;
-+		netif_tx_request_t *txp;
-+		unsigned long pending_idx;
++module_init(pciback_init);
++module_exit(pciback_cleanup);
 +
-+		pending_idx = (unsigned long)frag->page;
-+		txp = &pending_tx_info[pending_idx].req;
-+		frag->page = virt_to_page(idx_to_kaddr(pending_idx));
-+		frag->size = txp->size;
-+		frag->page_offset = txp->offset;
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/pciback.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/pciback.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,111 @@
++/*
++ * PCI Backend Common Data Structures & Function Declarations
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIBACK_H__
++#define __XEN_PCIBACK_H__
 +
-+		skb->len += txp->size;
-+		skb->data_len += txp->size;
-+		skb->truesize += txp->size;
-+	}
-+}
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <xen/xenbus.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <asm/atomic.h>
++#include <xen/interface/io/pciif.h>
 +
-+int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
-+		     int work_to_do)
-+{
-+	struct netif_extra_info extra;
-+	RING_IDX cons = netif->tx.req_cons;
++struct pci_dev_entry {
++	struct list_head list;
++	struct pci_dev *dev;
++};
 +
-+	do {
-+		if (unlikely(work_to_do-- <= 0)) {
-+			DPRINTK("Missing extra info\n");
-+			return -EBADR;
-+		}
++#define _PDEVF_op_active 	(0)
++#define PDEVF_op_active 	(1<<(_PDEVF_op_active))
 +
-+		memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
-+		       sizeof(extra));
-+		if (unlikely(!extra.type ||
-+			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-+			netif->tx.req_cons = ++cons;
-+			DPRINTK("Invalid extra type: %d\n", extra.type);
-+			return -EINVAL;
-+		}
++struct pciback_device {
++	void *pci_dev_data;
++	spinlock_t dev_lock;
 +
-+		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
-+		netif->tx.req_cons = ++cons;
-+	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
++	struct xenbus_device *xdev;
 +
-+	return work_to_do;
-+}
++	struct xenbus_watch be_watch;
++	u8 be_watching;
 +
-+static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
-+{
-+	if (!gso->u.gso.size) {
-+		DPRINTK("GSO size must not be zero.\n");
-+		return -EINVAL;
-+	}
++	int evtchn_irq;
 +
-+	/* Currently only TCPv4 S.O. is supported. */
-+	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-+		DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
-+		return -EINVAL;
-+	}
++	struct vm_struct *sh_area;
++	struct xen_pci_sharedinfo *sh_info;
 +
-+	skb_shinfo(skb)->gso_size = gso->u.gso.size;
-+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++	unsigned long flags;
 +
-+	/* Header must be checked, and gso_segs computed. */
-+	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-+	skb_shinfo(skb)->gso_segs = 0;
++	struct work_struct op_work;
++};
 +
-+	return 0;
-+}
++struct pciback_dev_data {
++	struct list_head config_fields;
++	int permissive;
++	int warned_on_write;
++};
 +
-+/* Called after netfront has transmitted */
-+static void net_tx_action(unsigned long unused)
-+{
-+	struct list_head *ent;
-+	struct sk_buff *skb;
-+	netif_t *netif;
-+	netif_tx_request_t txreq;
-+	netif_tx_request_t txfrags[MAX_SKB_FRAGS];
-+	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+	u16 pending_idx;
-+	RING_IDX i;
-+	gnttab_map_grant_ref_t *mop;
-+	unsigned int data_len;
-+	int ret, work_to_do;
++/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++					    int domain, int bus,
++					    int slot, int func);
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++				    struct pci_dev *dev);
++void pcistub_put_pci_dev(struct pci_dev *dev);
 +
-+	if (dealloc_cons != dealloc_prod)
-+		net_tx_action_dealloc();
++/* Ensure a device is turned off or reset */
++void pciback_reset_device(struct pci_dev *pdev);
 +
-+	mop = tx_map_ops;
-+	while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
-+		!list_empty(&net_schedule_list)) {
-+		/* Get a netif from the list with work to do. */
-+		ent = net_schedule_list.next;
-+		netif = list_entry(ent, netif_t, list);
-+		netif_get(netif);
-+		remove_from_net_schedule_list(netif);
++/* Access a virtual configuration space for a PCI device */
++int pciback_config_init(void);
++int pciback_config_init_dev(struct pci_dev *dev);
++void pciback_config_free_dyn_fields(struct pci_dev *dev);
++void pciback_config_reset_dev(struct pci_dev *dev);
++void pciback_config_free_dev(struct pci_dev *dev);
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++			u32 * ret_val);
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
 +
-+		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
-+		if (!work_to_do) {
-+			netif_put(netif);
-+			continue;
-+		}
++/* Handle requests for specific devices from the frontend */
++typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
++				   unsigned int domain, unsigned int bus,
++				   unsigned int devfn, unsigned int devid);
++typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
++				    unsigned int domain, unsigned int bus);
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++			int devid, publish_pci_dev_cb publish_cb);
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus,
++				    unsigned int devfn);
++int pciback_init_devices(struct pciback_device *pdev);
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++			      publish_pci_root_cb cb);
++void pciback_release_devices(struct pciback_device *pdev);
 +
-+		i = netif->tx.req_cons;
-+		rmb(); /* Ensure that we see the request before we copy it. */
-+		memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++/* Handles events from front-end */
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
++void pciback_do_op(void *data);
 +
-+		/* Credit-based scheduling. */
-+		if (txreq.size > netif->remaining_credit) {
-+			unsigned long now = jiffies;
-+			unsigned long next_credit = 
-+				netif->credit_timeout.expires +
-+				msecs_to_jiffies(netif->credit_usec / 1000);
++int pciback_xenbus_register(void);
++void pciback_xenbus_unregister(void);
 +
-+			/* Timer could already be pending in rare cases. */
-+			if (timer_pending(&netif->credit_timeout)) {
-+				netif_put(netif);
-+				continue;
-+			}
++#ifdef CONFIG_PCI_MSI
++int pciback_enable_msi(struct pciback_device *pdev,
++                       struct pci_dev *dev, struct xen_pci_op *op);
 +
-+			/* Passed the point where we can replenish credit? */
-+			if (time_after_eq(now, next_credit)) {
-+				netif->credit_timeout.expires = now;
-+				tx_add_credit(netif);
-+			}
++int pciback_disable_msi(struct pciback_device *pdev,
++                         struct pci_dev *dev, struct xen_pci_op *op);
 +
-+			/* Still too big to send right now? Set a callback. */
-+			if (txreq.size > netif->remaining_credit) {
-+				netif->credit_timeout.data     =
-+					(unsigned long)netif;
-+				netif->credit_timeout.function =
-+					tx_credit_callback;
-+				__mod_timer(&netif->credit_timeout,
-+					    next_credit);
-+				netif_put(netif);
-+				continue;
-+			}
-+		}
-+		netif->remaining_credit -= txreq.size;
 +
-+		work_to_do--;
-+		netif->tx.req_cons = ++i;
++int pciback_enable_msix(struct pciback_device *pdev,
++                        struct pci_dev *dev, struct xen_pci_op *op);
 +
-+		memset(extras, 0, sizeof(extras));
-+		if (txreq.flags & NETTXF_extra_info) {
-+			work_to_do = netbk_get_extras(netif, extras,
-+						      work_to_do);
-+			i = netif->tx.req_cons;
-+			if (unlikely(work_to_do < 0)) {
-+				netbk_tx_err(netif, &txreq, i);
-+				continue;
-+			}
-+		}
++int pciback_disable_msix(struct pciback_device *pdev,
++                        struct pci_dev *dev, struct xen_pci_op *op);
++#endif
++extern int verbose_request;
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/pciback_ops.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/pciback_ops.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,117 @@
++/*
++ * PCI Backend Operations - respond to PCI requests from Frontend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <asm/bitops.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
 +
-+		ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
-+		if (unlikely(ret < 0)) {
-+			netbk_tx_err(netif, &txreq, i - ret);
-+			continue;
-+		}
-+		i += ret;
++int verbose_request = 0;
++module_param(verbose_request, int, 0644);
 +
-+		if (unlikely(txreq.size < ETH_HLEN)) {
-+			DPRINTK("Bad packet size: %d\n", txreq.size);
-+			netbk_tx_err(netif, &txreq, i);
-+			continue;
-+		}
++/* Ensure a device is "turned off" and ready to be exported.
++ * (Also see pciback_config_reset to ensure virtual configuration space is
++ * ready to be re-exported)
++ */
++void pciback_reset_device(struct pci_dev *dev)
++{
++	u16 cmd;
 +
-+		/* No crossing a page as the payload mustn't fragment. */
-+		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-+			DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
-+				txreq.offset, txreq.size, 
-+				(txreq.offset &~PAGE_MASK) + txreq.size);
-+			netbk_tx_err(netif, &txreq, i);
-+			continue;
-+		}
++	/* Disable devices (but not bridges) */
++	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
++		pci_disable_device(dev);
 +
-+		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++		pci_write_config_word(dev, PCI_COMMAND, 0);
 +
-+		data_len = (txreq.size > PKT_PROT_LEN &&
-+			    ret < MAX_SKB_FRAGS) ?
-+			PKT_PROT_LEN : txreq.size;
++		dev->is_enabled = 0;
++		dev->is_busmaster = 0;
++	} else {
++		pci_read_config_word(dev, PCI_COMMAND, &cmd);
++		if (cmd & (PCI_COMMAND_INVALIDATE)) {
++			cmd &= ~(PCI_COMMAND_INVALIDATE);
++			pci_write_config_word(dev, PCI_COMMAND, cmd);
 +
-+		skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
-+				GFP_ATOMIC | __GFP_NOWARN);
-+		if (unlikely(skb == NULL)) {
-+			DPRINTK("Can't allocate a skb in start_xmit.\n");
-+			netbk_tx_err(netif, &txreq, i);
-+			break;
++			dev->is_busmaster = 0;
 +		}
++	}
++}
 +
-+		/* Packets passed to netif_rx() must have some headroom. */
-+		skb_reserve(skb, 16 + NET_IP_ALIGN);
++static inline void test_and_schedule_op(struct pciback_device *pdev)
++{
++	/* Check that frontend is requesting an operation and that we are not
++	 * already processing a request */
++	if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
++	    && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
++		schedule_work(&pdev->op_work);
++}
 +
-+		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-+			struct netif_extra_info *gso;
-+			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++/* Performing the configuration space reads/writes must not be done in atomic
++ * context because some of the pci_* functions can sleep (mostly due to ACPI
++ * use of semaphores). This function is intended to be called from a work
++ * queue in process context taking a struct pciback_device as a parameter */
++void pciback_do_op(void *data)
++{
++	struct pciback_device *pdev = data;
++	struct pci_dev *dev;
++	struct xen_pci_op *op = &pdev->sh_info->op;
 +
-+			if (netbk_set_skb_gso(skb, gso)) {
-+				kfree_skb(skb);
-+				netbk_tx_err(netif, &txreq, i);
-+				continue;
-+			}
++	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
++
++	if (dev == NULL)
++		op->err = XEN_PCI_ERR_dev_not_found;
++	else
++	{
++		switch (op->cmd)
++		{
++			case XEN_PCI_OP_conf_read:
++				op->err = pciback_config_read(dev,
++					  op->offset, op->size, &op->value);
++				break;
++			case XEN_PCI_OP_conf_write:
++				op->err = pciback_config_write(dev,
++					  op->offset, op->size,	op->value);
++				break;
++#ifdef CONFIG_PCI_MSI
++			case XEN_PCI_OP_enable_msi:
++				op->err = pciback_enable_msi(pdev, dev, op);
++				break;
++			case XEN_PCI_OP_disable_msi:
++				op->err = pciback_disable_msi(pdev, dev, op);
++				break;
++			case XEN_PCI_OP_enable_msix:
++				op->err = pciback_enable_msix(pdev, dev, op);
++				break;
++			case XEN_PCI_OP_disable_msix:
++				op->err = pciback_disable_msix(pdev, dev, op);
++				break;
++#endif
++			default:
++				op->err = XEN_PCI_ERR_not_implemented;
++				break;
 +		}
++	}
++	/* Tell the driver domain that we're done. */ 
++	wmb();
++	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++	notify_remote_via_irq(pdev->evtchn_irq);
 +
-+		gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
-+				  GNTMAP_host_map | GNTMAP_readonly,
-+				  txreq.gref, netif->domid);
-+		mop++;
++	/* Mark that we're done. */
++	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
++	clear_bit(_PDEVF_op_active, &pdev->flags);
++	smp_mb__after_clear_bit(); /* /before/ final check for work */
 +
-+		memcpy(&pending_tx_info[pending_idx].req,
-+		       &txreq, sizeof(txreq));
-+		pending_tx_info[pending_idx].netif = netif;
-+		*((u16 *)skb->data) = pending_idx;
++	/* Check to see if the driver domain tried to start another request in
++	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
++	test_and_schedule_op(pdev);
++}
 +
-+		__skb_put(skb, data_len);
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++{
++	struct pciback_device *pdev = dev_id;
 +
-+		skb_shinfo(skb)->nr_frags = ret;
-+		if (data_len < txreq.size) {
-+			skb_shinfo(skb)->nr_frags++;
-+			skb_shinfo(skb)->frags[0].page =
-+				(void *)(unsigned long)pending_idx;
-+		} else {
-+			/* Discriminate from any valid pending_idx value. */
-+			skb_shinfo(skb)->frags[0].page = (void *)~0UL;
-+		}
++	test_and_schedule_op(pdev);
 +
-+		__skb_queue_tail(&tx_queue, skb);
++	return IRQ_HANDLED;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/slot.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/slot.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,157 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ *               to the frontend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil> (vpci.c)
++ *   Author: Tristan Gingold <tristan.gingold at bull.net>, from vpci.c
++ */
 +
-+		pending_cons++;
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
 +
-+		mop = netbk_get_requests(netif, skb, txfrags, mop);
++/* There are at most 32 slots in a pci bus.  */
++#define PCI_SLOT_MAX 32
 +
-+		netif->tx.req_cons = i;
-+		netif_schedule_work(netif);
++#define PCI_BUS_NBR 2
 +
-+		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
-+			break;
-+	}
++struct slot_dev_data {
++	/* Access to dev_list must be protected by lock */
++	struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
++	spinlock_t lock;
++};
 +
-+	if (mop == tx_map_ops)
-+		return;
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus,
++				    unsigned int devfn)
++{
++	struct pci_dev *dev = NULL;
++	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++	unsigned long flags;
 +
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
-+	BUG_ON(ret);
++	if (domain != 0 || PCI_FUNC(devfn) != 0)
++		return NULL;
 +
-+	mop = tx_map_ops;
-+	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-+		netif_tx_request_t *txp;
++	if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
++		return NULL;
 +
-+		pending_idx = *((u16 *)skb->data);
-+		netif       = pending_tx_info[pending_idx].netif;
-+		txp         = &pending_tx_info[pending_idx].req;
++	spin_lock_irqsave(&slot_dev->lock, flags);
++	dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
++	spin_unlock_irqrestore(&slot_dev->lock, flags);
 +
-+		/* Check the remap error code. */
-+		if (unlikely(netbk_tx_check_mop(skb, &mop))) {
-+			DPRINTK("netback grant failed.\n");
-+			skb_shinfo(skb)->nr_frags = 0;
-+			kfree_skb(skb);
-+			continue;
-+		}
++	return dev;
++}
 +
-+		data_len = skb->len;
-+		memcpy(skb->data,
-+		       (void *)(idx_to_kaddr(pending_idx)|txp->offset),
-+		       data_len);
-+		if (data_len < txp->size) {
-+			/* Append the packet payload as a fragment. */
-+			txp->offset += data_len;
-+			txp->size -= data_len;
-+		} else {
-+			/* Schedule a response immediately. */
-+			netif_idx_release(pending_idx);
-+		}
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++			int devid, publish_pci_dev_cb publish_cb)
++{
++	int err = 0, slot, bus;
++	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++	unsigned long flags;
 +
-+		/*
-+		 * Old frontends do not assert data_validated but we
-+		 * can infer it from csum_blank so test both flags.
-+		 */
-+		if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+			skb->proto_data_valid = 1;
-+		} else {
-+			skb->ip_summed = CHECKSUM_NONE;
-+			skb->proto_data_valid = 0;
++	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++		err = -EFAULT;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Can't export bridges on the virtual PCI bus");
++		goto out;
++	}
++
++	spin_lock_irqsave(&slot_dev->lock, flags);
++
++	/* Assign to a new slot on the virtual PCI bus */
++	for (bus = 0; bus < PCI_BUS_NBR; bus++)
++		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++			if (slot_dev->slots[bus][slot] == NULL) {
++				printk(KERN_INFO
++				       "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
++				       pci_name(dev), slot, bus);
++				slot_dev->slots[bus][slot] = dev;
++				goto unlock;
++			}
 +		}
-+		skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
 +
-+		netbk_fill_frags(skb);
++	err = -ENOMEM;
++	xenbus_dev_fatal(pdev->xdev, err,
++			 "No more space on root virtual PCI bus");
 +
-+		skb->dev      = netif->dev;
-+		skb->protocol = eth_type_trans(skb, skb->dev);
++      unlock:
++	spin_unlock_irqrestore(&slot_dev->lock, flags);
 +
-+		netif->stats.rx_bytes += skb->len;
-+		netif->stats.rx_packets++;
++	/* Publish this device. */
++	if(!err)
++		err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
 +
-+		netif_rx(skb);
-+		netif->dev->last_rx = jiffies;
-+	}
++      out:
++	return err;
 +}
 +
-+static void netif_idx_release(u16 pending_idx)
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
 +{
-+	static DEFINE_SPINLOCK(_lock);
++	int slot, bus;
++	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++	struct pci_dev *found_dev = NULL;
 +	unsigned long flags;
 +
-+	spin_lock_irqsave(&_lock, flags);
-+	dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
-+	/* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
-+	smp_wmb();
-+	dealloc_prod++;
-+	spin_unlock_irqrestore(&_lock, flags);
++	spin_lock_irqsave(&slot_dev->lock, flags);
 +
-+	tasklet_schedule(&net_tx_tasklet);
-+}
++	for (bus = 0; bus < PCI_BUS_NBR; bus++)
++		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++			if (slot_dev->slots[bus][slot] == dev) {
++				slot_dev->slots[bus][slot] = NULL;
++				found_dev = dev;
++				goto out;
++			}
++		}
 +
-+static void netif_page_release(struct page *page)
-+{
-+	/* Ready for next use. */
-+	init_page_count(page);
++      out:
++	spin_unlock_irqrestore(&slot_dev->lock, flags);
 +
-+	netif_idx_release(netif_page_index(page));
++	if (found_dev)
++		pcistub_put_pci_dev(found_dev);
 +}
 +
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++int pciback_init_devices(struct pciback_device *pdev)
 +{
-+	netif_t *netif = dev_id;
++	int slot, bus;
++	struct slot_dev_data *slot_dev;
 +
-+	add_to_net_schedule_list_tail(netif);
-+	maybe_schedule_tx_action();
++	slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
++	if (!slot_dev)
++		return -ENOMEM;
 +
-+	if (netif_schedulable(netif) && !netbk_queue_full(netif))
-+		netif_wake_queue(netif->dev);
++	spin_lock_init(&slot_dev->lock);
 +
-+	return IRQ_HANDLED;
++	for (bus = 0; bus < PCI_BUS_NBR; bus++)
++		for (slot = 0; slot < PCI_SLOT_MAX; slot++)
++			slot_dev->slots[bus][slot] = NULL;
++
++	pdev->pci_dev_data = slot_dev;
++
++	return 0;
 +}
 +
-+static void make_tx_response(netif_t *netif, 
-+			     netif_tx_request_t *txp,
-+			     s8       st)
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++			      publish_pci_root_cb publish_cb)
 +{
-+	RING_IDX i = netif->tx.rsp_prod_pvt;
-+	netif_tx_response_t *resp;
-+	int notify;
-+
-+	resp = RING_GET_RESPONSE(&netif->tx, i);
-+	resp->id     = txp->id;
-+	resp->status = st;
++	/* The Virtual PCI bus has only one root */
++	return publish_cb(pdev, 0, 0);
++}
 +
-+	if (txp->flags & NETTXF_extra_info)
-+		RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++void pciback_release_devices(struct pciback_device *pdev)
++{
++	int slot, bus;
++	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++	struct pci_dev *dev;
 +
-+	netif->tx.rsp_prod_pvt = ++i;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
-+	if (notify)
-+		notify_remote_via_irq(netif->irq);
++	for (bus = 0; bus < PCI_BUS_NBR; bus++)
++		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++			dev = slot_dev->slots[bus][slot];
++			if (dev != NULL)
++				pcistub_put_pci_dev(dev);
++		}
 +
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+	if (i == netif->tx.req_cons) {
-+		int more_to_do;
-+		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+		if (more_to_do)
-+			add_to_net_schedule_list_tail(netif);
-+	}
-+#endif
++	kfree(slot_dev);
++	pdev->pci_dev_data = NULL;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/vpci.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/vpci.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,212 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ *               to the frontend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
 +
-+static netif_rx_response_t *make_rx_response(netif_t *netif, 
-+					     u16      id, 
-+					     s8       st,
-+					     u16      offset,
-+					     u16      size,
-+					     u16      flags)
-+{
-+	RING_IDX i = netif->rx.rsp_prod_pvt;
-+	netif_rx_response_t *resp;
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
 +
-+	resp = RING_GET_RESPONSE(&netif->rx, i);
-+	resp->offset     = offset;
-+	resp->flags      = flags;
-+	resp->id         = id;
-+	resp->status     = (s16)size;
-+	if (st < 0)
-+		resp->status = (s16)st;
++#define PCI_SLOT_MAX 32
 +
-+	netif->rx.rsp_prod_pvt = ++i;
++struct vpci_dev_data {
++	/* Access to dev_list must be protected by lock */
++	struct list_head dev_list[PCI_SLOT_MAX];
++	spinlock_t lock;
++};
 +
-+	return resp;
++static inline struct list_head *list_first(struct list_head *head)
++{
++	return head->next;
 +}
 +
-+#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus,
++				    unsigned int devfn)
 +{
-+	struct list_head *ent;
-+	netif_t *netif;
-+	int i = 0;
++	struct pci_dev_entry *entry;
++	struct pci_dev *dev = NULL;
++	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++	unsigned long flags;
 +
-+	printk(KERN_ALERT "netif_schedule_list:\n");
-+	spin_lock_irq(&net_schedule_list_lock);
++	if (domain != 0 || bus != 0)
++		return NULL;
 +
-+	list_for_each (ent, &net_schedule_list) {
-+		netif = list_entry(ent, netif_t, list);
-+		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
-+		       "rx_resp_prod=%08x\n",
-+		       i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-+		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-+		       netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-+		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
-+		       "rx_resp_prod=%08x\n",
-+		       netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-+		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-+		       netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
-+		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-+		       netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
-+		i++;
++	if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
++		spin_lock_irqsave(&vpci_dev->lock, flags);
++
++		list_for_each_entry(entry,
++				    &vpci_dev->dev_list[PCI_SLOT(devfn)],
++				    list) {
++			if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
++				dev = entry->dev;
++				break;
++			}
++		}
++
++		spin_unlock_irqrestore(&vpci_dev->lock, flags);
 +	}
++	return dev;
++}
 +
-+	spin_unlock_irq(&net_schedule_list_lock);
-+	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
++{
++	if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
++	    && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
++		return 1;
 +
-+	return IRQ_HANDLED;
++	return 0;
 +}
-+#endif
 +
-+static int __init netback_init(void)
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++			int devid, publish_pci_dev_cb publish_cb)
 +{
-+	int i;
-+	struct page *page;
++	int err = 0, slot, func;
++	struct pci_dev_entry *t, *dev_entry;
++	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++	unsigned long flags;
 +
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++		err = -EFAULT;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Can't export bridges on the virtual PCI bus");
++		goto out;
++	}
 +
-+	/* We can increase reservation by this much in net_rx_action(). */
-+	balloon_update_driver_allowance(NET_RX_RING_SIZE);
++	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++	if (!dev_entry) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error adding entry to virtual PCI bus");
++		goto out;
++	}
 +
-+	skb_queue_head_init(&rx_queue);
-+	skb_queue_head_init(&tx_queue);
++	dev_entry->dev = dev;
 +
-+	init_timer(&net_timer);
-+	net_timer.data = 0;
-+	net_timer.function = net_alarm;
++	spin_lock_irqsave(&vpci_dev->lock, flags);
 +
-+	mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
-+	if (mmap_pages == NULL) {
-+		printk("%s: out of memory\n", __FUNCTION__);
-+		return -ENOMEM;
-+	}
++	/* Keep multi-function devices together on the virtual PCI bus */
++	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++		if (!list_empty(&vpci_dev->dev_list[slot])) {
++			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
++				       struct pci_dev_entry, list);
 +
-+	for (i = 0; i < MAX_PENDING_REQS; i++) {
-+		page = mmap_pages[i];
-+		SetPageForeign(page, netif_page_release);
-+		netif_page_index(page) = i;
++			if (match_slot(dev, t->dev)) {
++				pr_info("pciback: vpci: %s: "
++					"assign to virtual slot %d func %d\n",
++					pci_name(dev), slot,
++					PCI_FUNC(dev->devfn));
++				list_add_tail(&dev_entry->list,
++					      &vpci_dev->dev_list[slot]);
++				func = PCI_FUNC(dev->devfn);
++				goto unlock;
++			}
++		}
 +	}
 +
-+	pending_cons = 0;
-+	pending_prod = MAX_PENDING_REQS;
-+	for (i = 0; i < MAX_PENDING_REQS; i++)
-+		pending_ring[i] = i;
++	/* Assign to a new slot on the virtual PCI bus */
++	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++		if (list_empty(&vpci_dev->dev_list[slot])) {
++			printk(KERN_INFO
++			       "pciback: vpci: %s: assign to virtual slot %d\n",
++			       pci_name(dev), slot);
++			list_add_tail(&dev_entry->list,
++				      &vpci_dev->dev_list[slot]);
++			func = PCI_FUNC(dev->devfn);
++			goto unlock;
++		}
++	}
 +
-+	spin_lock_init(&net_schedule_list_lock);
-+	INIT_LIST_HEAD(&net_schedule_list);
++	err = -ENOMEM;
++	xenbus_dev_fatal(pdev->xdev, err,
++			 "No more space on root virtual PCI bus");
 +
-+	netif_xenbus_init();
++      unlock:
++	spin_unlock_irqrestore(&vpci_dev->lock, flags);
 +
-+#ifdef NETBE_DEBUG_INTERRUPT
-+	(void)bind_virq_to_irqhandler(VIRQ_DEBUG,
-+				      0,
-+				      netif_be_dbg,
-+				      SA_SHIRQ, 
-+				      "net-be-dbg",
-+				      &netif_be_dbg);
-+#endif
++	/* Publish this device. */
++	if(!err)
++		err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
 +
-+	return 0;
++      out:
++	return err;
 +}
 +
-+module_init(netback_init);
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++	int slot;
++	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++	struct pci_dev *found_dev = NULL;
++	unsigned long flags;
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netback/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/netback/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/netback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netback/xenbus.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,448 @@
-+/*  Xenbus code for netif backend
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+    Copyright (C) 2005 XenSource Ltd
++	spin_lock_irqsave(&vpci_dev->lock, flags);
 +
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
++	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++		struct pci_dev_entry *e, *tmp;
++		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++					 list) {
++			if (e->dev == dev) {
++				list_del(&e->list);
++				found_dev = e->dev;
++				kfree(e);
++				goto out;
++			}
++		}
++	}
 +
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
++      out:
++	spin_unlock_irqrestore(&vpci_dev->lock, flags);
 +
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
++	if (found_dev)
++		pcistub_put_pci_dev(found_dev);
++}
 +
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
++int pciback_init_devices(struct pciback_device *pdev)
++{
++	int slot;
++	struct vpci_dev_data *vpci_dev;
 +
-+#if 0
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+    printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+#endif
++	vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
++	if (!vpci_dev)
++		return -ENOMEM;
 +
-+struct backend_info {
-+	struct xenbus_device *dev;
-+	netif_t *netif;
-+	enum xenbus_state frontend_state;
-+};
++	spin_lock_init(&vpci_dev->lock);
 +
-+static int connect_rings(struct backend_info *);
-+static void connect(struct backend_info *);
-+static void backend_create_netif(struct backend_info *be);
++	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++		INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
++	}
 +
-+static int netback_remove(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->dev.driver_data;
++	pdev->pci_dev_data = vpci_dev;
 +
-+	if (be->netif) {
-+		netif_disconnect(be->netif);
-+		be->netif = NULL;
-+	}
-+	kfree(be);
-+	dev->dev.driver_data = NULL;
 +	return 0;
 +}
 +
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++			      publish_pci_root_cb publish_cb)
++{
++	/* The Virtual PCI bus has only one root */
++	return publish_cb(pdev, 0, 0);
++}
 +
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures and switch to InitWait.
-+ */
-+static int netback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
++void pciback_release_devices(struct pciback_device *pdev)
 +{
-+	const char *message;
-+	struct xenbus_transaction xbt;
-+	int err;
-+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
-+		return -ENOMEM;
++	int slot;
++	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++
++	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++		struct pci_dev_entry *e, *tmp;
++		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++					 list) {
++			list_del(&e->list);
++			pcistub_put_pci_dev(e->dev);
++			kfree(e);
++		}
 +	}
 +
-+	be->dev = dev;
-+	dev->dev.driver_data = be;
++	kfree(vpci_dev);
++	pdev->pci_dev_data = NULL;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pciback/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pciback/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,704 @@
++/*
++ * PCI Backend Xenbus Setup - handles setup with frontend and xend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
 +
-+	do {
-+		err = xenbus_transaction_start(&xbt);
-+		if (err) {
-+			xenbus_dev_fatal(dev, err, "starting transaction");
-+			goto fail;
-+		}
++#define INVALID_EVTCHN_IRQ  (-1)
 +
-+		err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
-+		if (err) {
-+			message = "writing feature-sg";
-+			goto abort_transaction;
-+		}
++static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
++{
++	struct pciback_device *pdev;
 +
-+		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
-+				    "%d", 1);
-+		if (err) {
-+			message = "writing feature-gso-tcpv4";
-+			goto abort_transaction;
-+		}
++	pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
++	if (pdev == NULL)
++		goto out;
++	dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
 +
-+		/* We support rx-copy path. */
-+		err = xenbus_printf(xbt, dev->nodename,
-+				    "feature-rx-copy", "%d", 1);
-+		if (err) {
-+			message = "writing feature-rx-copy";
-+			goto abort_transaction;
-+		}
++	pdev->xdev = xdev;
++	xdev->dev.driver_data = pdev;
 +
-+		/*
-+		 * We don't support rx-flip path (except old guests who don't
-+		 * grok this feature flag).
-+		 */
-+		err = xenbus_printf(xbt, dev->nodename,
-+				    "feature-rx-flip", "%d", 0);
-+		if (err) {
-+			message = "writing feature-rx-flip";
-+			goto abort_transaction;
-+		}
++	spin_lock_init(&pdev->dev_lock);
 +
-+		err = xenbus_transaction_end(xbt, 0);
-+	} while (err == -EAGAIN);
++	pdev->sh_area = NULL;
++	pdev->sh_info = NULL;
++	pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++	pdev->be_watching = 0;
 +
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto fail;
++	INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++
++	if (pciback_init_devices(pdev)) {
++		kfree(pdev);
++		pdev = NULL;
 +	}
++      out:
++	return pdev;
++}
 +
-+	err = xenbus_switch_state(dev, XenbusStateInitWait);
-+	if (err)
-+		goto fail;
++static void pciback_disconnect(struct pciback_device *pdev)
++{
++	spin_lock(&pdev->dev_lock);
 +
-+	/* This kicks hotplug scripts, so do it immediately. */
-+	backend_create_netif(be);
++	/* Ensure the guest can't trigger our handler before removing devices */
++	if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
++		unbind_from_irqhandler(pdev->evtchn_irq, pdev);
++		pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++	}
 +
-+	return 0;
++	/* If the driver domain started an op, make sure we complete it or
++	 * delete it before releasing the shared memory */
++	cancel_delayed_work(&pdev->op_work);
++	flush_scheduled_work();
 +
-+abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	xenbus_dev_fatal(dev, err, "%s", message);
-+fail:
-+	DPRINTK("failed");
-+	netback_remove(dev);
-+	return err;
-+}
++	if (pdev->sh_info != NULL) {
++		xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
++		pdev->sh_info = NULL;
++	}
 +
++	spin_unlock(&pdev->dev_lock);
++}
 +
-+/**
-+ * Handle the creation of the hotplug script environment.  We add the script
-+ * and vif variables to the environment, for the benefit of the vif-* hotplug
-+ * scripts.
-+ */
-+static int netback_uevent(struct xenbus_device *xdev, char **envp,
-+			  int num_envp, char *buffer, int buffer_size)
++static void free_pdev(struct pciback_device *pdev)
 +{
-+	struct backend_info *be = xdev->dev.driver_data;
-+	netif_t *netif = be->netif;
-+	int i = 0, length = 0;
-+	char *val;
-+
-+	DPRINTK("netback_uevent");
++	if (pdev->be_watching)
++		unregister_xenbus_watch(&pdev->be_watch);
 +
-+	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-+	if (IS_ERR(val)) {
-+		int err = PTR_ERR(val);
-+		xenbus_dev_fatal(xdev, err, "reading script");
-+		return err;
-+	}
-+	else {
-+		add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-+			       &length, "script=%s", val);
-+		kfree(val);
-+	}
++	pciback_disconnect(pdev);
 +
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "vif=%s", netif->dev->name);
++	pciback_release_devices(pdev);
 +
-+	envp[i] = NULL;
++	pdev->xdev->dev.driver_data = NULL;
++	pdev->xdev = NULL;
 +
-+	return 0;
++	kfree(pdev);
 +}
 +
-+
-+static void backend_create_netif(struct backend_info *be)
++static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
++			     int remote_evtchn)
 +{
-+	int err;
-+	long handle;
-+	struct xenbus_device *dev = be->dev;
++	int err = 0;
++	struct vm_struct *area;
 +
-+	if (be->netif != NULL)
-+		return;
++	dev_dbg(&pdev->xdev->dev,
++		"Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
++		gnt_ref, remote_evtchn);
 +
-+	err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading handle");
-+		return;
++	area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
++	if (IS_ERR(area)) {
++		err = PTR_ERR(area);
++		goto out;
 +	}
++	pdev->sh_area = area;
++	pdev->sh_info = area->addr;
 +
-+	be->netif = netif_alloc(dev->otherend_id, handle);
-+	if (IS_ERR(be->netif)) {
-+		err = PTR_ERR(be->netif);
-+		be->netif = NULL;
-+		xenbus_dev_fatal(dev, err, "creating interface");
-+		return;
++	err = bind_interdomain_evtchn_to_irqhandler(
++		pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
++		SA_SAMPLE_RANDOM, "pciback", pdev);
++	if (err < 0) {
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error binding event channel to IRQ");
++		goto out;
 +	}
++	pdev->evtchn_irq = err;
++	err = 0;
 +
-+	kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
++	dev_dbg(&pdev->xdev->dev, "Attached!\n");
++      out:
++	return err;
 +}
 +
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state)
++static int pciback_attach(struct pciback_device *pdev)
 +{
-+	struct backend_info *be = dev->dev.driver_data;
-+
-+	DPRINTK("%s", xenbus_strstate(frontend_state));
-+
-+	be->frontend_state = frontend_state;
++	int err = 0;
++	int gnt_ref, remote_evtchn;
++	char *magic = NULL;
 +
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+		if (dev->state == XenbusStateClosed) {
-+			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+			       __FUNCTION__, dev->nodename);
-+			if (be->netif) {
-+				netif_disconnect(be->netif);
-+				be->netif = NULL;
-+			}
-+			xenbus_switch_state(dev, XenbusStateInitWait);
-+		}
-+		break;
++	spin_lock(&pdev->dev_lock);
 +
-+	case XenbusStateInitialised:
-+		break;
++	/* Make sure we only do this setup once */
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateInitialised)
++		goto out;
 +
-+	case XenbusStateConnected:
-+		backend_create_netif(be);
-+		if (be->netif)
-+			connect(be);
-+		break;
++	/* Wait for frontend to state that it has published the configuration */
++	if (xenbus_read_driver_state(pdev->xdev->otherend) !=
++	    XenbusStateInitialised)
++		goto out;
 +
-+	case XenbusStateClosing:
-+		xenbus_switch_state(dev, XenbusStateClosing);
-+		break;
++	dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
 +
-+	case XenbusStateClosed:
-+		xenbus_switch_state(dev, XenbusStateClosed);
-+		if (xenbus_dev_is_online(dev))
-+			break;
-+		/* fall through if not online */
-+	case XenbusStateUnknown:
-+		if (be->netif != NULL)
-+			kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-+		device_unregister(&dev->dev);
-+		break;
++	err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
++			    "pci-op-ref", "%u", &gnt_ref,
++			    "event-channel", "%u", &remote_evtchn,
++			    "magic", NULL, &magic, NULL);
++	if (err) {
++		/* If configuration didn't get read correctly, wait longer */
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading configuration from frontend");
++		goto out;
++	}
 +
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+				 frontend_state);
-+		break;
++	if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
++		xenbus_dev_fatal(pdev->xdev, -EFAULT,
++				 "version mismatch (%s/%s) with pcifront - "
++				 "halting pciback",
++				 magic, XEN_PCI_MAGIC);
++		goto out;
 +	}
-+}
 +
++	err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
++	if (err)
++		goto out;
 +
-+static void xen_net_read_rate(struct xenbus_device *dev,
-+			      unsigned long *bytes, unsigned long *usec)
-+{
-+	char *s, *e;
-+	unsigned long b, u;
-+	char *ratestr;
++	dev_dbg(&pdev->xdev->dev, "Connecting...\n");
 +
-+	/* Default to unlimited bandwidth. */
-+	*bytes = ~0UL;
-+	*usec = 0;
++	err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++	if (err)
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error switching to connected state!");
 +
-+	ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
-+	if (IS_ERR(ratestr))
-+		return;
++	dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
++      out:
++	spin_unlock(&pdev->dev_lock);
 +
-+	s = ratestr;
-+	b = simple_strtoul(s, &e, 10);
-+	if ((s == e) || (*e != ','))
-+		goto fail;
++	if (magic)
++		kfree(magic);
 +
-+	s = e + 1;
-+	u = simple_strtoul(s, &e, 10);
-+	if ((s == e) || (*e != '\0'))
-+		goto fail;
++	return err;
++}
 +
-+	*bytes = b;
-+	*usec = u;
++static int pciback_publish_pci_dev(struct pciback_device *pdev,
++				   unsigned int domain, unsigned int bus,
++				   unsigned int devfn, unsigned int devid)
++{
++	int err;
++	int len;
++	char str[64];
 +
-+	kfree(ratestr);
-+	return;
++	len = snprintf(str, sizeof(str), "vdev-%d", devid);
++	if (unlikely(len >= (sizeof(str) - 1))) {
++		err = -ENOMEM;
++		goto out;
++	}
 +
-+ fail:
-+	WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
-+	kfree(ratestr);
++	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++			    "%04x:%02x:%02x.%02x", domain, bus,
++			    PCI_SLOT(devfn), PCI_FUNC(devfn));
++
++      out:
++	return err;
 +}
 +
-+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++static int pciback_export_device(struct pciback_device *pdev,
++				 int domain, int bus, int slot, int func,
++				 int devid)
 +{
-+	char *s, *e, *macstr;
-+	int i;
++	struct pci_dev *dev;
++	int err = 0;
 +
-+	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
-+	if (IS_ERR(macstr))
-+		return PTR_ERR(macstr);
++	dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
++		domain, bus, slot, func);
 +
-+	for (i = 0; i < ETH_ALEN; i++) {
-+		mac[i] = simple_strtoul(s, &e, 16);
-+		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
-+			kfree(macstr);
-+			return -ENOENT;
-+		}
-+		s = e+1;
++	dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
++	if (!dev) {
++		err = -EINVAL;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Couldn't locate PCI device "
++				 "(%04x:%02x:%02x.%01x)! "
++				 "perhaps already in-use?",
++				 domain, bus, slot, func);
++		goto out;
 +	}
 +
-+	kfree(macstr);
-+	return 0;
++	err = pciback_add_pci_dev(pdev, dev, devid, pciback_publish_pci_dev);
++	if (err)
++		goto out;
++
++	/* TODO: It'd be nice to export a bridge and have all of its children
++	 * get exported with it. This may be best done in xend (which will
++	 * have to calculate resource usage anyway) but we probably want to
++	 * put something in here to ensure that if a bridge gets given to a
++	 * driver domain, that all devices under that bridge are not given
++	 * to other driver domains (as he who controls the bridge can disable
++	 * it and stop the other devices from working).
++	 */
++      out:
++	return err;
 +}
 +
-+static void connect(struct backend_info *be)
++static int pciback_remove_device(struct pciback_device *pdev,
++				 int domain, int bus, int slot, int func)
 +{
-+	int err;
-+	struct xenbus_device *dev = be->dev;
++	int err = 0;
++	struct pci_dev *dev;
 +
-+	err = connect_rings(be);
-+	if (err)
-+		return;
++	dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
++		domain, bus, slot, func);
 +
-+	err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+		return;
++	dev = pciback_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
++	if (!dev) {
++		err = -EINVAL;
++		dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
++			"(%04x:%02x:%02x.%01x)! not owned by this domain\n",
++			domain, bus, slot, func);
++		goto out;
 +	}
 +
-+	xen_net_read_rate(dev, &be->netif->credit_bytes,
-+			  &be->netif->credit_usec);
-+	be->netif->remaining_credit = be->netif->credit_bytes;
-+
-+	xenbus_switch_state(dev, XenbusStateConnected);
-+
-+	netif_wake_queue(be->netif->dev);
++	pciback_release_pci_dev(pdev, dev);
++	
++      out:
++	return err;
 +}
 +
-+
-+static int connect_rings(struct backend_info *be)
++static int pciback_publish_pci_root(struct pciback_device *pdev,
++				    unsigned int domain, unsigned int bus)
 +{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long tx_ring_ref, rx_ring_ref;
-+	unsigned int evtchn, rx_copy;
-+	int err;
-+	int val;
-+
-+	DPRINTK("");
++	unsigned int d, b;
++	int i, root_num, len, err;
++	char str[64];
 +
-+	err = xenbus_gather(XBT_NIL, dev->otherend,
-+			    "tx-ring-ref", "%lu", &tx_ring_ref,
-+			    "rx-ring-ref", "%lu", &rx_ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
++	dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
 +
-+	err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
-+			   &rx_copy);
-+	if (err == -ENOENT) {
-+		err = 0;
-+		rx_copy = 0;
-+	}
-+	if (err < 0) {
-+		xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
-+				 dev->otherend);
-+		return err;
-+	}
-+	be->netif->copying_receiver = !!rx_copy;
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++			   "root_num", "%d", &root_num);
++	if (err == 0 || err == -ENOENT)
++		root_num = 0;
++	else if (err < 0)
++		goto out;
 +
-+	if (be->netif->dev->tx_queue_len != 0) {
-+		if (xenbus_scanf(XBT_NIL, dev->otherend,
-+				 "feature-rx-notify", "%d", &val) < 0)
-+			val = 0;
-+		if (val)
-+			be->netif->can_queue = 1;
-+		else
-+			/* Must be non-zero for pfifo_fast to work. */
-+			be->netif->dev->tx_queue_len = 1;
-+	}
++	/* Verify that we haven't already published this pci root */
++	for (i = 0; i < root_num; i++) {
++		len = snprintf(str, sizeof(str), "root-%d", i);
++		if (unlikely(len >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
++		}
 +
-+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
-+		val = 0;
-+	if (val) {
-+		be->netif->features |= NETIF_F_SG;
-+		be->netif->dev->features |= NETIF_F_SG;
-+	}
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++				   str, "%x:%x", &d, &b);
++		if (err < 0)
++			goto out;
++		if (err != 2) {
++			err = -EINVAL;
++			goto out;
++		}
 +
-+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
-+			 &val) < 0)
-+		val = 0;
-+	if (val) {
-+		be->netif->features |= NETIF_F_TSO;
-+		be->netif->dev->features |= NETIF_F_TSO;
++		if (d == domain && b == bus) {
++			err = 0;
++			goto out;
++		}
 +	}
 +
-+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
-+			 "%d", &val) < 0)
-+		val = 0;
-+	if (val) {
-+		be->netif->features &= ~NETIF_F_IP_CSUM;
-+		be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++	len = snprintf(str, sizeof(str), "root-%d", root_num);
++	if (unlikely(len >= (sizeof(str) - 1))) {
++		err = -ENOMEM;
++		goto out;
 +	}
 +
-+	/* Map the shared frame, irq etc. */
-+	err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "mapping shared-frames %lu/%lu port %u",
-+				 tx_ring_ref, rx_ring_ref, evtchn);
-+		return err;
-+	}
-+	return 0;
-+}
++	dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
++		root_num, domain, bus);
 +
++	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++			    "%04x:%02x", domain, bus);
++	if (err)
++		goto out;
 +
-+/* ** Driver Registration ** */
++	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++			    "root_num", "%d", (root_num + 1));
 +
++      out:
++	return err;
++}
 +
-+static struct xenbus_device_id netback_ids[] = {
-+	{ "vif" },
-+	{ "" }
-+};
++static int pciback_reconfigure(struct pciback_device *pdev)
++{
++	int err = 0;
++	int num_devs;
++	int domain, bus, slot, func;
++	int substate;
++	int i, len;
++	char state_str[64];
++	char dev_str[64];
 +
++	spin_lock(&pdev->dev_lock);
 +
-+static struct xenbus_driver netback = {
-+	.name = "vif",
-+	.owner = THIS_MODULE,
-+	.ids = netback_ids,
-+	.probe = netback_probe,
-+	.remove = netback_remove,
-+	.uevent = netback_uevent,
-+	.otherend_changed = frontend_changed,
-+};
++	dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
 +
++	/* Make sure we only reconfigure once */
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateReconfiguring)
++		goto out;
 +
-+void netif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&netback);
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netfront/Makefile tmp-linux-2.6-xen.patch/drivers/xen/netfront/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/netfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netfront/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,4 @@
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++			   &num_devs);
++	if (err != 1) {
++		if (err >= 0)
++			err = -EINVAL;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading number of devices");
++		goto out;
++	}
 +
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND)	:= xennet.o
++	for (i = 0; i < num_devs; i++) {
++		len = snprintf(state_str, sizeof(state_str), "state-%d", i);
++		if (unlikely(len >= (sizeof(state_str) - 1))) {
++			err = -ENOMEM;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "String overflow while reading "
++					 "configuration");
++			goto out;
++		}
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
++				   "%d", &substate);
++		if (err != 1) 
++			substate = XenbusStateUnknown;
++
++		switch (substate) {
++		case XenbusStateInitialising:
++			dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
 +
-+xennet-objs := netfront.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/netfront/netfront.c tmp-linux-2.6-xen.patch/drivers/xen/netfront/netfront.c
---- pristine-linux-2.6.18.2/drivers/xen/netfront/netfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/netfront/netfront.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,2172 @@
-+/******************************************************************************
-+ * Virtual network driver for conversing with remote driver backends.
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * Copyright (c) 2005, XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++			len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++			if (unlikely(len >= (sizeof(dev_str) - 1))) {
++				err = -ENOMEM;
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "String overflow while "
++						 "reading configuration");
++				goto out;
++			}
++			err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++					   dev_str, "%x:%x:%x.%x",
++					   &domain, &bus, &slot, &func);
++			if (err < 0) {
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error reading device "
++						 "configuration");
++				goto out;
++			}
++			if (err != 4) {
++				err = -EINVAL;
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error parsing pci device "
++						 "configuration");
++				goto out;
++			}
++	
++			err = pciback_export_device(pdev, domain, bus, slot,
++						    func, i);
++			if (err)
++				goto out;
 +
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <linux/bitops.h>
-+#include <linux/ethtool.h>
-+#include <linux/in.h>
-+#include <linux/if_ether.h>
-+#include <linux/io.h>
-+#include <linux/moduleparam.h>
-+#include <net/sock.h>
-+#include <net/pkt_sched.h>
-+#include <net/arp.h>
-+#include <net/route.h>
-+#include <asm/uaccess.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/io/netif.h>
-+#include <xen/interface/memory.h>
-+#include <xen/balloon.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
++			/* Publish pci roots. */
++			err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++			if (err) {
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error while publish PCI root"
++						 "buses for frontend");
++				goto out;
++			}
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++			err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++					    state_str, "%d",
++					    XenbusStateInitialised);
++			if (err) {
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error switching substate of "
++						 "dev-%d\n", i);
++				goto out;
++			}	
++			break;
 +
-+struct netfront_cb {
-+	struct page *page;
-+	unsigned offset;
-+};
++		case XenbusStateClosing:
++			dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
 +
-+#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
++			len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
++			if (unlikely(len >= (sizeof(dev_str) - 1))) {
++				err = -ENOMEM;
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "String overflow while "
++						 "reading configuration");
++				goto out;
++			}
++			err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++					   dev_str, "%x:%x:%x.%x",
++					   &domain, &bus, &slot, &func);
++			if (err < 0) {
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error reading device "
++						 "configuration");
++				goto out;
++			}
++			if (err != 4) {
++				err = -EINVAL;
++				xenbus_dev_fatal(pdev->xdev, err,
++						 "Error parsing pci device "
++						 "configuration");
++				goto out;
++			}
 +
-+/*
-+ * Mutually-exclusive module options to select receive data path:
-+ *  rx_copy : Packets are copied by network backend into local memory
-+ *  rx_flip : Page containing packet data is transferred to our ownership
-+ * For fully-virtualised guests there is no option - copying must be used.
-+ * For paravirtualised guests, flipping is the default.
-+ */
-+#ifdef CONFIG_XEN
-+static int MODPARM_rx_copy = 0;
-+module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
-+MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
-+static int MODPARM_rx_flip = 0;
-+module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
-+MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
-+#else
-+static const int MODPARM_rx_copy = 1;
-+static const int MODPARM_rx_flip = 0;
-+#endif
++			err = pciback_remove_device(pdev, domain, bus, slot,
++						    func);
++			if(err)
++				goto out;
 +
-+#define RX_COPY_THRESHOLD 256
++			/* TODO: If at some point we implement support for pci
++			 * root hot-remove on pcifront side, we'll need to
++			 * remove unnecessary xenstore nodes of pci roots here.
++			 */
 +
-+/* If we don't have GSO, fake things up so that we never try to use it. */
-+#if defined(NETIF_F_GSO)
-+#define HAVE_GSO			1
-+#define HAVE_TSO			1 /* TSO is a subset of GSO */
-+static inline void dev_disable_gso_features(struct net_device *dev)
-+{
-+	/* Turn off all GSO bits except ROBUST. */
-+	dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
-+	dev->features |= NETIF_F_GSO_ROBUST;
-+}
-+#elif defined(NETIF_F_TSO)
-+#define HAVE_TSO                       1
++			break;
 +
-+/* Some older kernels cannot cope with incorrect checksums,
-+ * particularly in netfilter. I'm not sure there is 100% correlation
-+ * with the presence of NETIF_F_TSO but it appears to be a good first
-+ * approximiation.
-+ */
-+#define HAVE_NO_CSUM_OFFLOAD           1
++		default:
++			break;
++		}
++	}
 +
-+#define gso_size tso_size
-+#define gso_segs tso_segs
-+static inline void dev_disable_gso_features(struct net_device *dev)
-+{
-+       /* Turn off all TSO bits. */
-+       dev->features &= ~NETIF_F_TSO;
-+}
-+static inline int skb_is_gso(const struct sk_buff *skb)
-+{
-+        return skb_shinfo(skb)->tso_size;
-+}
-+static inline int skb_gso_ok(struct sk_buff *skb, int features)
-+{
-+        return (features & NETIF_F_TSO);
++	err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
++	if (err) {
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error switching to reconfigured state!");
++		goto out;
++	}
++	
++      out:
++	spin_unlock(&pdev->dev_lock);
++
++	return 0;
 +}
 +
-+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
++static void pciback_frontend_changed(struct xenbus_device *xdev,
++				     enum xenbus_state fe_state)
 +{
-+        return skb_is_gso(skb) &&
-+               (!skb_gso_ok(skb, dev->features) ||
-+                unlikely(skb->ip_summed != CHECKSUM_HW));
-+}
-+#else
-+#define netif_needs_gso(dev, skb)	0
-+#define dev_disable_gso_features(dev)	((void)0)
-+#endif
++	struct pciback_device *pdev = xdev->dev.driver_data;
 +
-+#define GRANT_INVALID_REF	0
++	dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
 +
-+#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
++	switch (fe_state) {
++	case XenbusStateInitialised:
++		pciback_attach(pdev);
++		break;
 +
-+struct netfront_info {
-+	struct list_head list;
-+	struct net_device *netdev;
++	case XenbusStateReconfiguring:
++		pciback_reconfigure(pdev);
++		break;
 +
-+	struct net_device_stats stats;
++	case XenbusStateConnected:
++		/* pcifront switched its state from reconfiguring to connected.
++		 * Then switch to connected state.
++		 */
++		xenbus_switch_state(xdev, XenbusStateConnected);
++		break;
 +
-+	struct netif_tx_front_ring tx;
-+	struct netif_rx_front_ring rx;
++	case XenbusStateClosing:
++		pciback_disconnect(pdev);
++		xenbus_switch_state(xdev, XenbusStateClosing);
++		break;
 +
-+	spinlock_t   tx_lock;
-+	spinlock_t   rx_lock;
++	case XenbusStateClosed:
++		pciback_disconnect(pdev);
++		xenbus_switch_state(xdev, XenbusStateClosed);
++		if (xenbus_dev_is_online(xdev))
++			break;
++		/* fall through if not online */
++	case XenbusStateUnknown:
++		dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
++		device_unregister(&xdev->dev);
++		break;
 +
-+	unsigned int irq;
-+	unsigned int copying_receiver;
-+	unsigned int carrier;
++	default:
++		break;
++	}
++}
 +
-+	/* Receive-ring batched refills. */
-+#define RX_MIN_TARGET 8
-+#define RX_DFL_MIN_TARGET 64
-+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
-+	unsigned rx_min_target, rx_max_target, rx_target;
-+	struct sk_buff_head rx_batch;
++static int pciback_setup_backend(struct pciback_device *pdev)
++{
++	/* Get configuration from xend (if available now) */
++	int domain, bus, slot, func;
++	int err = 0;
++	int i, num_devs;
++	char dev_str[64];
++	char state_str[64];
 +
-+	struct timer_list rx_refill_timer;
++	spin_lock(&pdev->dev_lock);
 +
-+	/*
-+	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
-+	 * is an index into a chain of free entries.
++	/* It's possible we could get the call to setup twice, so make sure
++	 * we're not already connected.
 +	 */
-+	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
-+	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateInitWait)
++		goto out;
 +
-+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
-+	grant_ref_t gref_tx_head;
-+	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
-+	grant_ref_t gref_rx_head;
-+	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++	dev_dbg(&pdev->xdev->dev, "getting be setup\n");
 +
-+	struct xenbus_device *xbdev;
-+	int tx_ring_ref;
-+	int rx_ring_ref;
-+	u8 mac[ETH_ALEN];
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++			   &num_devs);
++	if (err != 1) {
++		if (err >= 0)
++			err = -EINVAL;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading number of devices");
++		goto out;
++	}
 +
-+	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-+	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
-+	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
-+};
++	for (i = 0; i < num_devs; i++) {
++		int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++		if (unlikely(l >= (sizeof(dev_str) - 1))) {
++			err = -ENOMEM;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "String overflow while reading "
++					 "configuration");
++			goto out;
++		}
 +
-+struct netfront_rx_info {
-+	struct netif_rx_response rx;
-+	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+};
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
++				   "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++		if (err < 0) {
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error reading device configuration");
++			goto out;
++		}
++		if (err != 4) {
++			err = -EINVAL;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error parsing pci device "
++					 "configuration");
++			goto out;
++		}
 +
-+/*
-+ * Implement our own carrier flag: the network stack's version causes delays
-+ * when the carrier is re-enabled (in particular, dev_activate() may not
-+ * immediately be called, which can cause packet loss).
-+ */
-+#define netfront_carrier_on(netif)	((netif)->carrier = 1)
-+#define netfront_carrier_off(netif)	((netif)->carrier = 0)
-+#define netfront_carrier_ok(netif)	((netif)->carrier)
++		err = pciback_export_device(pdev, domain, bus, slot, func, i);
++		if (err)
++			goto out;
 +
-+/*
-+ * Access macros for acquiring freeing slots in tx_skbs[].
-+ */
++		/* Switch substate of this device. */
++		l = snprintf(state_str, sizeof(state_str), "state-%d", i);
++		if (unlikely(l >= (sizeof(state_str) - 1))) {
++			err = -ENOMEM;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "String overflow while reading "
++					 "configuration");
++			goto out;
++		}
++		err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
++				    "%d", XenbusStateInitialised);
++		if (err) {
++			xenbus_dev_fatal(pdev->xdev, err, "Error switching "
++					 "substate of dev-%d\n", i);
++			goto out;
++		}	
++	}
 +
-+static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
++	err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++	if (err) {
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error while publish PCI root buses "
++				 "for frontend");
++		goto out;
++	}
++
++	err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++	if (err)
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error switching to initialised state!");
++
++      out:
++	spin_unlock(&pdev->dev_lock);
++
++	if (!err)
++		/* see if pcifront is already configured (if not, we'll wait) */
++		pciback_attach(pdev);
++
++	return err;
++}
++
++static void pciback_be_watch(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
 +{
-+	list[id] = list[0];
-+	list[0]  = (void *)(unsigned long)id;
++	struct pciback_device *pdev =
++	    container_of(watch, struct pciback_device, be_watch);
++
++	switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
++	case XenbusStateInitWait:
++		pciback_setup_backend(pdev);
++		break;
++
++	default:
++		break;
++	}
 +}
 +
-+static inline unsigned short get_id_from_freelist(struct sk_buff **list)
++static int pciback_xenbus_probe(struct xenbus_device *dev,
++				const struct xenbus_device_id *id)
 +{
-+	unsigned int id = (unsigned int)(unsigned long)list[0];
-+	list[0] = list[id];
-+	return id;
++	int err = 0;
++	struct pciback_device *pdev = alloc_pdev(dev);
++
++	if (pdev == NULL) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev, err,
++				 "Error allocating pciback_device struct");
++		goto out;
++	}
++
++	/* wait for xend to configure us */
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
++	if (err)
++		goto out;
++
++	/* watch the backend node for backend configuration information */
++	err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
++				pciback_be_watch);
++	if (err)
++		goto out;
++	pdev->be_watching = 1;
++
++	/* We need to force a call to our callback here in case
++	 * xend already configured us!
++	 */
++	pciback_be_watch(&pdev->be_watch, NULL, 0);
++
++      out:
++	return err;
 +}
 +
-+static inline int xennet_rxidx(RING_IDX idx)
++static int pciback_xenbus_remove(struct xenbus_device *dev)
 +{
-+	return idx & (NET_RX_RING_SIZE - 1);
++	struct pciback_device *pdev = dev->dev.driver_data;
++
++	if (pdev != NULL)
++		free_pdev(pdev);
++
++	return 0;
 +}
 +
-+static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
-+						RING_IDX ri)
++static const struct xenbus_device_id xenpci_ids[] = {
++	{"pci"},
++	{{0}},
++};
++
++static struct xenbus_driver xenbus_pciback_driver = {
++	.name 			= "pciback",
++	.owner 			= THIS_MODULE,
++	.ids 			= xenpci_ids,
++	.probe 			= pciback_xenbus_probe,
++	.remove 		= pciback_xenbus_remove,
++	.otherend_changed 	= pciback_frontend_changed,
++};
++
++int __init pciback_xenbus_register(void)
 +{
-+	int i = xennet_rxidx(ri);
-+	struct sk_buff *skb = np->rx_skbs[i];
-+	np->rx_skbs[i] = NULL;
-+	return skb;
++	if (!is_running_on_xen())
++		return -ENODEV;
++
++	return xenbus_register_backend(&xenbus_pciback_driver);
 +}
 +
-+static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
-+					    RING_IDX ri)
++void __exit pciback_xenbus_unregister(void)
 +{
-+	int i = xennet_rxidx(ri);
-+	grant_ref_t ref = np->grant_rx_ref[i];
-+	np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+	return ref;
++	xenbus_unregister_driver(&xenbus_pciback_driver);
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pcifront/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pcifront/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,7 @@
++obj-y += pcifront.o
 +
-+#define DPRINTK(fmt, args...)				\
-+	pr_debug("netfront (%s:%d) " fmt,		\
-+		 __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...)				\
-+	printk(KERN_INFO "netfront: " fmt, ##args)
-+#define WPRINTK(fmt, args...)				\
-+	printk(KERN_WARNING "netfront: " fmt, ##args)
++pcifront-y := pci_op.o xenbus.o pci.o
 +
-+static int setup_device(struct xenbus_device *, struct netfront_info *);
-+static struct net_device *create_netdev(struct xenbus_device *);
++ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pcifront/pci.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pcifront/pci.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,46 @@
++/*
++ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pcifront.h"
 +
-+static void end_access(int, void *);
-+static void netif_disconnect_backend(struct netfront_info *);
++DEFINE_SPINLOCK(pcifront_dev_lock);
++static struct pcifront_device *pcifront_dev = NULL;
 +
-+static int network_connect(struct net_device *);
-+static void network_tx_buf_gc(struct net_device *);
-+static void network_alloc_rx_buffers(struct net_device *);
-+static int send_fake_arp(struct net_device *);
++int pcifront_connect(struct pcifront_device *pdev)
++{
++	int err = 0;
 +
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++	spin_lock(&pcifront_dev_lock);
 +
-+#ifdef CONFIG_SYSFS
-+static int xennet_sysfs_addif(struct net_device *netdev);
-+static void xennet_sysfs_delif(struct net_device *netdev);
-+#else /* !CONFIG_SYSFS */
-+#define xennet_sysfs_addif(dev) (0)
-+#define xennet_sysfs_delif(dev) do { } while(0)
-+#endif
++	if (!pcifront_dev) {
++		dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
++		pcifront_dev = pdev;
++	}
++	else {
++		dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
++		err = -EEXIST;
++	}
 +
-+static inline int xennet_can_sg(struct net_device *dev)
-+{
-+	return dev->features & NETIF_F_SG;
++	spin_unlock(&pcifront_dev_lock);
++
++	return err;
 +}
 +
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures and the ring buffers for communication with the backend, and
-+ * inform the backend of the appropriate details for those.
-+ */
-+static int __devinit netfront_probe(struct xenbus_device *dev,
-+				    const struct xenbus_device_id *id)
++void pcifront_disconnect(struct pcifront_device *pdev)
 +{
-+	int err;
-+	struct net_device *netdev;
-+	struct netfront_info *info;
++	spin_lock(&pcifront_dev_lock);
 +
-+	netdev = create_netdev(dev);
-+	if (IS_ERR(netdev)) {
-+		err = PTR_ERR(netdev);
-+		xenbus_dev_fatal(dev, err, "creating netdev");
-+		return err;
++	if (pdev == pcifront_dev) {
++		dev_info(&pdev->xdev->dev,
++			 "Disconnecting PCI Frontend Buses\n");
++		pcifront_dev = NULL;
 +	}
 +
-+	info = netdev_priv(netdev);
-+	dev->dev.driver_data = info;
++	spin_unlock(&pcifront_dev_lock);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pcifront/pci_op.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pcifront/pci_op.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,551 @@
++/*
++ * PCI Frontend Operations - Communicates with frontend
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <xen/evtchn.h>
++#include "pcifront.h"
 +
-+	err = register_netdev(info->netdev);
-+	if (err) {
-+		printk(KERN_WARNING "%s: register_netdev err=%d\n",
-+		       __FUNCTION__, err);
-+		goto fail;
-+	}
++static int verbose_request = 0;
++module_param(verbose_request, int, 0644);
 +
-+	err = xennet_sysfs_addif(info->netdev);
-+	if (err) {
-+		unregister_netdev(info->netdev);
-+		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
-+		       __FUNCTION__, err);
-+		goto fail;
++#ifdef __ia64__
++static void pcifront_init_sd(struct pcifront_sd *sd,
++			     unsigned int domain, unsigned int bus,
++			     struct pcifront_device *pdev)
++{
++	int err, i, j, k, len, root_num, res_count;
++	struct acpi_resource res;
++	unsigned int d, b, byte;
++	unsigned long magic;
++	char str[64], tmp[3];
++	unsigned char *buf, *bufp;
++	u8 *ptr;
++
++	memset(sd, 0, sizeof(*sd));
++
++	sd->segment = domain;
++	sd->node = -1;	/* Revisit for NUMA */
++	sd->platform_data = pdev;
++
++	/* Look for resources for this controller in xenbus. */
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num",
++			   "%d", &root_num);
++	if (err != 1)
++		return;
++
++	for (i = 0; i < root_num; i++) {
++		len = snprintf(str, sizeof(str), "root-%d", i);
++		if (unlikely(len >= (sizeof(str) - 1)))
++			return;
++
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++				   str, "%x:%x", &d, &b);
++		if (err != 2)
++			return;
++
++		if (d == domain && b == bus)
++			break;
 +	}
 +
-+	return 0;
++	if (i == root_num)
++		return;
 +
-+ fail:
-+	free_netdev(netdev);
-+	dev->dev.driver_data = NULL;
-+	return err;
-+}
++	len = snprintf(str, sizeof(str), "root-resource-magic");
 +
-+static int __devexit netfront_remove(struct xenbus_device *dev)
-+{
-+	struct netfront_info *info = dev->dev.driver_data;
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++			   str, "%lx", &magic);
 +
-+	DPRINTK("%s\n", dev->nodename);
++	if (err != 1)
++		return; /* No resources, nothing to do */
 +
-+	netif_disconnect_backend(info);
++	if (magic != (sizeof(res) * 2) + 1) {
++		printk(KERN_WARNING "pcifront: resource magic mismatch\n");
++		return;
++	}
 +
-+	del_timer_sync(&info->rx_refill_timer);
++	len = snprintf(str, sizeof(str), "root-%d-resources", i);
++	if (unlikely(len >= (sizeof(str) - 1)))
++		return;
 +
-+	xennet_sysfs_delif(info->netdev);
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++			   str, "%d", &res_count);
 +
-+	unregister_netdev(info->netdev);
++	if (err != 1)
++		return; /* No resources, nothing to do */
 +
-+	free_netdev(info->netdev);
++	sd->window = kzalloc(sizeof(*sd->window) * res_count, GFP_KERNEL);
++	if (!sd->window)
++		return;
 +
-+	return 0;
-+}
++	/* magic is also the size of the byte stream in xenbus */
++	buf = kmalloc(magic, GFP_KERNEL);
++	if (!buf) {
++		kfree(sd->window);
++		sd->window = NULL;
++		return;
++	}
 +
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart.  We tear down our netif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int netfront_resume(struct xenbus_device *dev)
-+{
-+	struct netfront_info *info = dev->dev.driver_data;
++	/* Read the resources out of xenbus */
++	for (j = 0; j < res_count; j++) {
++		memset(&res, 0, sizeof(res));
++		memset(buf, 0, magic);
 +
-+	DPRINTK("%s\n", dev->nodename);
++		len = snprintf(str, sizeof(str), "root-%d-resource-%d", i, j);
++		if (unlikely(len >= (sizeof(str) - 1)))
++			return;
 +
-+	netif_disconnect_backend(info);
-+	return 0;
-+}
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++				   "%s", buf);
++		if (err != 1) {
++			printk(KERN_WARNING "pcifront: error reading "
++			       "resource %d on bus %04x:%02x\n",
++			       j, domain, bus);
++			continue;
++		}
 +
-+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+	char *s, *e, *macstr;
-+	int i;
++		bufp = buf;
++		ptr = (u8 *)&res;
++		memset(tmp, 0, sizeof(tmp));
 +
-+	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
-+	if (IS_ERR(macstr))
-+		return PTR_ERR(macstr);
++		/* Copy ASCII byte stream into structure */
++		for (k = 0; k < magic - 1; k += 2) {
++			memcpy(tmp, bufp, 2);
++			bufp += 2;
 +
-+	for (i = 0; i < ETH_ALEN; i++) {
-+		mac[i] = simple_strtoul(s, &e, 16);
-+		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
-+			kfree(macstr);
-+			return -ENOENT;
++			sscanf(tmp, "%02x", &byte);
++			*ptr = byte;
++			ptr++;
 +		}
-+		s = e+1;
-+	}
 +
-+	kfree(macstr);
-+	return 0;
++		xen_add_resource(sd, domain, bus, &res);
++		sd->windows++;
++	}
++	kfree(buf);
 +}
++#endif
 +
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+			   struct netfront_info *info)
++static int errno_to_pcibios_err(int errno)
 +{
-+	const char *message;
-+	struct xenbus_transaction xbt;
-+	int err;
++	switch (errno) {
++	case XEN_PCI_ERR_success:
++		return PCIBIOS_SUCCESSFUL;
 +
-+	err = xen_net_read_mac(dev, info->mac);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+		goto out;
-+	}
++	case XEN_PCI_ERR_dev_not_found:
++		return PCIBIOS_DEVICE_NOT_FOUND;
 +
-+	/* Create shared ring, alloc event channel. */
-+	err = setup_device(dev, info);
-+	if (err)
-+		goto out;
++	case XEN_PCI_ERR_invalid_offset:
++	case XEN_PCI_ERR_op_failed:
++		return PCIBIOS_BAD_REGISTER_NUMBER;
 +
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_ring;
-+	}
++	case XEN_PCI_ERR_not_implemented:
++		return PCIBIOS_FUNC_NOT_SUPPORTED;
 +
-+	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
-+			    info->tx_ring_ref);
-+	if (err) {
-+		message = "writing tx ring-ref";
-+		goto abort_transaction;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
-+			    info->rx_ring_ref);
-+	if (err) {
-+		message = "writing rx ring-ref";
-+		goto abort_transaction;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "event-channel", "%u",
-+			    irq_to_evtchn_port(info->irq));
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
++	case XEN_PCI_ERR_access_denied:
++		return PCIBIOS_SET_FAILED;
 +	}
++	return errno;
++}
 +
-+	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
-+			    info->copying_receiver);
-+	if (err) {
-+		message = "writing request-rx-copy";
-+		goto abort_transaction;
-+	}
++static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
++{
++	int err = 0;
++	struct xen_pci_op *active_op = &pdev->sh_info->op;
++	unsigned long irq_flags;
++	evtchn_port_t port = pdev->evtchn;
++	s64 ns, ns_timeout;
++	struct timeval tv;
 +
-+	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
-+	if (err) {
-+		message = "writing feature-rx-notify";
-+		goto abort_transaction;
-+	}
++	spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
 +
-+#ifdef HAVE_NO_CSUM_OFFLOAD
-+	err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1);
-+	if (err) {
-+		message = "writing feature-no-csum-offload";
-+		goto abort_transaction;
-+	}
-+#endif
++	memcpy(active_op, op, sizeof(struct xen_pci_op));
 +
-+	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
-+	if (err) {
-+		message = "writing feature-sg";
-+		goto abort_transaction;
-+	}
++	/* Go */
++	wmb();
++	set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++	notify_remote_via_evtchn(port);
 +
-+#ifdef HAVE_TSO
-+	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
-+	if (err) {
-+		message = "writing feature-gso-tcpv4";
-+		goto abort_transaction;
-+	}
-+#endif
++	/*
++	 * We set a poll timeout of 3 seconds but give up on return after
++	 * 2 seconds. It is better to time out too late rather than too early
++	 * (in the latter case we end up continually re-executing poll() with a
++	 * timeout in the past). 1s difference gives plenty of slack for error.
++	 */
++	do_gettimeofday(&tv);
++	ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err) {
-+		if (err == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_ring;
++	clear_evtchn(port);
++
++	while (test_bit(_XEN_PCIF_active,
++			(unsigned long *)&pdev->sh_info->flags)) {
++		if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
++			BUG();
++		clear_evtchn(port);
++		do_gettimeofday(&tv);
++		ns = timeval_to_ns(&tv);
++		if (ns > ns_timeout) {
++			dev_err(&pdev->xdev->dev,
++				"pciback not responding!!!\n");
++			clear_bit(_XEN_PCIF_active,
++				  (unsigned long *)&pdev->sh_info->flags);
++			err = XEN_PCI_ERR_dev_not_found;
++			goto out;
++		}
 +	}
 +
-+	return 0;
++	memcpy(op, active_op, sizeof(struct xen_pci_op));
 +
-+ abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_ring:
-+	netif_disconnect_backend(info);
-+ out:
++	err = op->err;
++      out:
++	spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
 +	return err;
 +}
 +
-+static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
++			     int where, int size, u32 * val)
 +{
-+	struct netif_tx_sring *txs;
-+	struct netif_rx_sring *rxs;
-+	int err;
-+	struct net_device *netdev = info->netdev;
-+
-+	info->tx_ring_ref = GRANT_INVALID_REF;
-+	info->rx_ring_ref = GRANT_INVALID_REF;
-+	info->rx.sring = NULL;
-+	info->tx.sring = NULL;
-+	info->irq = 0;
++	int err = 0;
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_conf_read,
++		.domain = pci_domain_nr(bus),
++		.bus    = bus->number,
++		.devfn  = devfn,
++		.offset = where,
++		.size   = size,
++	};
++	struct pcifront_sd *sd = bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
 +
-+	txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
-+	if (!txs) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev, err, "allocating tx ring page");
-+		goto fail;
-+	}
-+	SHARED_RING_INIT(txs);
-+	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
++	if (verbose_request)
++		dev_info(&pdev->xdev->dev,
++			 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
++			 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
++			 PCI_FUNC(devfn), where, size);
 +
-+	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
-+	if (err < 0) {
-+		free_page((unsigned long)txs);
-+		goto fail;
-+	}
-+	info->tx_ring_ref = err;
++	err = do_pci_op(pdev, &op);
 +
-+	rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
-+	if (!rxs) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev, err, "allocating rx ring page");
-+		goto fail;
-+	}
-+	SHARED_RING_INIT(rxs);
-+	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
++	if (likely(!err)) {
++		if (verbose_request)
++			dev_info(&pdev->xdev->dev, "read got back value %x\n",
++				 op.value);
 +
-+	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
-+	if (err < 0) {
-+		free_page((unsigned long)rxs);
-+		goto fail;
++		*val = op.value;
++	} else if (err == -ENODEV) {
++		/* No device here, pretend that it just returned 0 */
++		err = 0;
++		*val = 0;
 +	}
-+	info->rx_ring_ref = err;
 +
-+	memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++	return errno_to_pcibios_err(err);
++}
 +
-+	err = bind_listening_port_to_irqhandler(
-+		dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
-+		netdev);
-+	if (err < 0)
-+		goto fail;
-+	info->irq = err;
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
++			      int where, int size, u32 val)
++{
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_conf_write,
++		.domain = pci_domain_nr(bus),
++		.bus    = bus->number,
++		.devfn  = devfn,
++		.offset = where,
++		.size   = size,
++		.value  = val,
++	};
++	struct pcifront_sd *sd = bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
 +
-+	return 0;
++	if (verbose_request)
++		dev_info(&pdev->xdev->dev,
++			 "write dev=%04x:%02x:%02x.%01x - "
++			 "offset %x size %d val %x\n",
++			 pci_domain_nr(bus), bus->number,
++			 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
 +
-+ fail:
-+	return err;
++	return errno_to_pcibios_err(do_pci_op(pdev, &op));
 +}
 +
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+			    enum xenbus_state backend_state)
++struct pci_ops pcifront_bus_ops = {
++	.read = pcifront_bus_read,
++	.write = pcifront_bus_write,
++};
++
++#ifdef CONFIG_PCI_MSI
++int pci_frontend_enable_msix(struct pci_dev *dev,
++		struct msix_entry *entries,
++		int nvec)
 +{
-+	struct netfront_info *np = dev->dev.driver_data;
-+	struct net_device *netdev = np->netdev;
++	int err;
++	int i;
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_enable_msix,
++		.domain = pci_domain_nr(dev->bus),
++		.bus = dev->bus->number,
++		.devfn = dev->devfn,
++		.value = nvec,
++	};
++	struct pcifront_sd *sd = dev->bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
 +
-+	DPRINTK("%s\n", xenbus_strstate(backend_state));
++	if (nvec > SH_INFO_MAX_VEC) {
++		printk("too much vector for pci frontend%x\n", nvec);
++		return -EINVAL;
++	}
 +
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitialised:
-+	case XenbusStateConnected:
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		break;
++	for (i = 0; i < nvec; i++) {
++		op.msix_entries[i].entry = entries[i].entry;
++		op.msix_entries[i].vector = entries[i].vector;
++	}
 +
-+	case XenbusStateInitWait:
-+		if (dev->state != XenbusStateInitialising)
-+			break;
-+		if (network_connect(netdev) != 0)
-+			break;
-+		xenbus_switch_state(dev, XenbusStateConnected);
-+		(void)send_fake_arp(netdev);
-+		break;
++	err = do_pci_op(pdev, &op);
 +
-+	case XenbusStateClosing:
-+		xenbus_frontend_closed(dev);
-+		break;
++	if (!err) {
++		if (!op.value) {
++			/* we get the result */
++			for ( i = 0; i < nvec; i++)
++				entries[i].vector = op.msix_entries[i].vector;
++			return 0;
++		}
++		else {
++            printk("enable msix get value %x\n", op.value);
++			return op.value;
++		}
++	}
++	else {
++        printk("enable msix get err %x\n", err);
++		return err;
 +	}
 +}
 +
-+/** Send a packet on a net device to encourage switches to learn the
-+ * MAC. We send a fake ARP request.
-+ *
-+ * @param dev device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int send_fake_arp(struct net_device *dev)
++void pci_frontend_disable_msix(struct pci_dev* dev)
 +{
-+	struct sk_buff *skb;
-+	u32             src_ip, dst_ip;
-+
-+	dst_ip = INADDR_BROADCAST;
-+	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-+
-+	/* No IP? Then nothing to do. */
-+	if (src_ip == 0)
-+		return 0;
++	int err;
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_disable_msix,
++		.domain = pci_domain_nr(dev->bus),
++		.bus = dev->bus->number,
++		.devfn = dev->devfn,
++	};
++	struct pcifront_sd *sd = dev->bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
 +
-+	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-+			 dst_ip, dev, src_ip,
-+			 /*dst_hw*/ NULL, /*src_hw*/ NULL,
-+			 /*target_hw*/ dev->dev_addr);
-+	if (skb == NULL)
-+		return -ENOMEM;
++	err = do_pci_op(pdev, &op);
 +
-+	return dev_queue_xmit(skb);
++	/* What should do for error ? */
++	if (err)
++		printk("pci_disable_msix get err %x\n", err);
 +}
 +
-+static inline int netfront_tx_slot_available(struct netfront_info *np)
++int pci_frontend_enable_msi(struct pci_dev *dev)
 +{
-+	return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
-+		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
++	int err;
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_enable_msi,
++		.domain = pci_domain_nr(dev->bus),
++		.bus = dev->bus->number,
++		.devfn = dev->devfn,
++	};
++	struct pcifront_sd *sd = dev->bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++	err = do_pci_op(pdev, &op);
++	if (likely(!err)) {
++		dev->irq = op.value;
++	}
++	else {
++		printk("pci frontend enable msi failed for dev %x:%x \n",
++				op.bus, op.devfn);
++		err = -EINVAL;
++	}
++	return err;
 +}
 +
-+static inline void network_maybe_wake_tx(struct net_device *dev)
++void pci_frontend_disable_msi(struct pci_dev* dev)
 +{
-+	struct netfront_info *np = netdev_priv(dev);
++	int err;
++	struct xen_pci_op op = {
++		.cmd    = XEN_PCI_OP_disable_msi,
++		.domain = pci_domain_nr(dev->bus),
++		.bus = dev->bus->number,
++		.devfn = dev->devfn,
++	};
++	struct pcifront_sd *sd = dev->bus->sysdata;
++	struct pcifront_device *pdev = pcifront_get_pdev(sd);
 +
-+	if (unlikely(netif_queue_stopped(dev)) &&
-+	    netfront_tx_slot_available(np) &&
-+	    likely(netif_running(dev)))
-+		netif_wake_queue(dev);
++	err = do_pci_op(pdev, &op);
++	if (err == XEN_PCI_ERR_dev_not_found) {
++		/* XXX No response from backend, what shall we do? */
++		printk("get no response from backend for disable MSI\n");
++		return;
++	}
++	if (likely(!err))
++		dev->irq = op.value;
++	else
++		/* how can pciback notify us fail? */
++		printk("get fake response frombackend \n");
 +}
++#endif /* CONFIG_PCI_MSI */
 +
-+static int network_open(struct net_device *dev)
++/* Claim resources for the PCI frontend as-is, backend won't allow changes */
++static void pcifront_claim_resource(struct pci_dev *dev, void *data)
 +{
-+	struct netfront_info *np = netdev_priv(dev);
++	struct pcifront_device *pdev = data;
++	int i;
++	struct resource *r;
 +
-+	memset(&np->stats, 0, sizeof(np->stats));
++	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++		r = &dev->resource[i];
 +
-+	spin_lock_bh(&np->rx_lock);
-+	if (netfront_carrier_ok(np)) {
-+		network_alloc_rx_buffers(dev);
-+		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
-+		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-+			netif_rx_schedule(dev);
++		if (!r->parent && r->start && r->flags) {
++			dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
++				pci_name(dev), i);
++			pci_claim_resource(dev, i);
++		}
 +	}
-+	spin_unlock_bh(&np->rx_lock);
-+
-+	network_maybe_wake_tx(dev);
-+
-+	return 0;
 +}
 +
-+static void network_tx_buf_gc(struct net_device *dev)
++int __devinit pcifront_scan_root(struct pcifront_device *pdev,
++				 unsigned int domain, unsigned int bus)
 +{
-+	RING_IDX cons, prod;
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb;
++	struct pci_bus *b;
++	struct pcifront_sd *sd = NULL;
++	struct pci_bus_entry *bus_entry = NULL;
++	int err = 0;
 +
-+	BUG_ON(!netfront_carrier_ok(np));
++#ifndef CONFIG_PCI_DOMAINS
++	if (domain != 0) {
++		dev_err(&pdev->xdev->dev,
++			"PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++		dev_err(&pdev->xdev->dev,
++			"Please compile with CONFIG_PCI_DOMAINS\n");
++		err = -EINVAL;
++		goto err_out;
++	}
++#endif
 +
-+	do {
-+		prod = np->tx.sring->rsp_prod;
-+		rmb(); /* Ensure we see responses up to 'rp'. */
++	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
++		 domain, bus);
 +
-+		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
-+			struct netif_tx_response *txrsp;
++	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
++	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++	if (!bus_entry || !sd) {
++		err = -ENOMEM;
++		goto err_out;
++	}
++	pcifront_init_sd(sd, domain, bus, pdev);
 +
-+			txrsp = RING_GET_RESPONSE(&np->tx, cons);
-+			if (txrsp->status == NETIF_RSP_NULL)
-+				continue;
++	b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
++				  &pcifront_bus_ops, sd);
++	if (!b) {
++		dev_err(&pdev->xdev->dev,
++			"Error creating PCI Frontend Bus!\n");
++		err = -ENOMEM;
++		goto err_out;
++	}
 +
-+			id  = txrsp->id;
-+			skb = np->tx_skbs[id];
-+			if (unlikely(gnttab_query_foreign_access(
-+				np->grant_tx_ref[id]) != 0)) {
-+				printk(KERN_ALERT "network_tx_buf_gc: warning "
-+				       "-- grant still in use by backend "
-+				       "domain.\n");
-+				BUG();
-+			}
-+			gnttab_end_foreign_access_ref(
-+				np->grant_tx_ref[id], GNTMAP_readonly);
-+			gnttab_release_grant_reference(
-+				&np->gref_tx_head, np->grant_tx_ref[id]);
-+			np->grant_tx_ref[id] = GRANT_INVALID_REF;
-+			add_id_to_freelist(np->tx_skbs, id);
-+			dev_kfree_skb_irq(skb);
-+		}
++	pcifront_setup_root_resources(b, sd);
++	bus_entry->bus = b;
 +
-+		np->tx.rsp_cons = prod;
++	list_add(&bus_entry->list, &pdev->root_buses);
 +
-+		/*
-+		 * Set a new event, then check for race with update of tx_cons.
-+		 * Note that it is essential to schedule a callback, no matter
-+		 * how few buffers are pending. Even if there is space in the
-+		 * transmit ring, higher layers may be blocked because too much
-+		 * data is outstanding: in such cases notification from Xen is
-+		 * likely to be the only kick that we'll get.
-+		 */
-+		np->tx.sring->rsp_event =
-+			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
-+		mb();
-+	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
++	/* Claim resources before going "live" with our devices */
++	pci_walk_bus(b, pcifront_claim_resource, pdev);
 +
-+	network_maybe_wake_tx(dev);
-+}
++	pci_bus_add_devices(b);
 +
-+static void rx_refill_timeout(unsigned long data)
-+{
-+	struct net_device *dev = (struct net_device *)data;
-+	netif_rx_schedule(dev);
++	return 0;
++
++      err_out:
++	kfree(bus_entry);
++	kfree(sd);
++
++	return err;
 +}
 +
-+static void network_alloc_rx_buffers(struct net_device *dev)
++int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
++				   unsigned int domain, unsigned int bus)
 +{
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb;
-+	struct page *page;
-+	int i, batch_target, notify;
-+	RING_IDX req_prod = np->rx.req_prod_pvt;
-+	struct xen_memory_reservation reservation;
-+	grant_ref_t ref;
-+ 	unsigned long pfn;
-+ 	void *vaddr;
-+	int nr_flips;
-+	netif_rx_request_t *req;
++	struct pci_bus *b;
++	struct pci_dev *d;
++	unsigned int devfn;
 +
-+	if (unlikely(!netfront_carrier_ok(np)))
-+		return;
++#ifndef CONFIG_PCI_DOMAINS
++	if (domain != 0) {
++		dev_err(&pdev->xdev->dev,
++			"PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++		dev_err(&pdev->xdev->dev,
++			"Please compile with CONFIG_PCI_DOMAINS\n");
++		return -EINVAL;
++	}
++#endif
 +
-+	/*
-+	 * Allocate skbuffs greedily, even though we batch updates to the
-+	 * receive ring. This creates a less bursty demand on the memory
-+	 * allocator, so should reduce the chance of failed allocation requests
-+	 * both for ourself and for other kernel subsystems.
-+	 */
-+	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
-+	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-+		/*
-+		 * Allocate an skb and a page. Do not use __dev_alloc_skb as
-+		 * that will allocate page-sized buffers which is not
-+		 * necessary here.
-+		 * 16 bytes added as necessary headroom for netif_receive_skb.
-+		 */
-+		skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
-+				GFP_ATOMIC | __GFP_NOWARN);
-+		if (unlikely(!skb))
-+			goto no_skb;
++	dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
++		 domain, bus);
 +
-+		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-+		if (!page) {
-+			kfree_skb(skb);
-+no_skb:
-+			/* Any skbuffs queued for refill? Force them out. */
-+			if (i != 0)
-+				goto refill;
-+			/* Could not allocate any skbuffs. Try again later. */
-+			mod_timer(&np->rx_refill_timer,
-+				  jiffies + (HZ/10));
-+			break;
++	b = pci_find_bus(domain, bus);
++	if(!b)
++		/* If the bus is unknown, create it. */
++		return pcifront_scan_root(pdev, domain, bus);
++
++	/* Rescan the bus for newly attached functions and add.
++	 * We omit handling of PCI bridge attachment because pciback prevents
++	 * bridges from being exported.
++	 */ 
++	for (devfn = 0; devfn < 0x100; devfn++) {
++		d = pci_get_slot(b, devfn);
++		if(d) {
++			/* Device is already known. */
++			pci_dev_put(d);
++			continue;
 +		}
 +
-+		skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
-+		skb_shinfo(skb)->frags[0].page = page;
-+		skb_shinfo(skb)->nr_frags = 1;
-+		__skb_queue_tail(&np->rx_batch, skb);
++		d = pci_scan_single_device(b, devfn);
++		if (d) {
++			dev_info(&pdev->xdev->dev, "New device on "
++				 "%04x:%02x:%02x.%02x found.\n", domain, bus,
++				 PCI_SLOT(devfn), PCI_FUNC(devfn));
++			pci_bus_add_device(d);
++		}
 +	}
 +
-+	/* Is the batch large enough to be worthwhile? */
-+	if (i < (np->rx_target/2)) {
-+		if (req_prod > np->rx.sring->req_prod)
-+			goto push;
-+		return;
-+	}
++	return 0;
++}
 +
-+	/* Adjust our fill target if we risked running out of buffers. */
-+	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
-+	    ((np->rx_target *= 2) > np->rx_max_target))
-+		np->rx_target = np->rx_max_target;
++static void free_root_bus_devs(struct pci_bus *bus)
++{
++	struct pci_dev *dev;
 +
-+ refill:
-+	for (nr_flips = i = 0; ; i++) {
-+		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
-+			break;
++	while (!list_empty(&bus->devices)) {
++		dev = container_of(bus->devices.next, struct pci_dev,
++				   bus_list);
++		dev_dbg(&dev->dev, "removing device\n");
++		pci_remove_bus_device(dev);
++	}
++}
 +
-+		skb->dev = dev;
++void pcifront_free_roots(struct pcifront_device *pdev)
++{
++	struct pci_bus_entry *bus_entry, *t;
 +
-+		id = xennet_rxidx(req_prod + i);
++	dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
 +
-+		BUG_ON(np->rx_skbs[id]);
-+		np->rx_skbs[id] = skb;
++	list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
++		list_del(&bus_entry->list);
 +
-+		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
-+		BUG_ON((signed short)ref < 0);
-+		np->grant_rx_ref[id] = ref;
++		free_root_bus_devs(bus_entry->bus);
 +
-+		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
-+		vaddr = page_address(skb_shinfo(skb)->frags[0].page);
++		kfree(bus_entry->bus->sysdata);
 +
-+		req = RING_GET_REQUEST(&np->rx, req_prod + i);
-+		if (!np->copying_receiver) {
-+			gnttab_grant_foreign_transfer_ref(ref,
-+							  np->xbdev->otherend_id,
-+							  pfn);
-+			np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
-+			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+				/* Remove this page before passing
-+				 * back to Xen. */
-+				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+				MULTI_update_va_mapping(np->rx_mcl+i,
-+							(unsigned long)vaddr,
-+							__pte(0), 0);
-+			}
-+			nr_flips++;
-+		} else {
-+			gnttab_grant_foreign_access_ref(ref,
-+							np->xbdev->otherend_id,
-+							pfn_to_mfn(pfn),
-+							0);
-+		}
++		device_unregister(bus_entry->bus->bridge);
++		pci_remove_bus(bus_entry->bus);
 +
-+		req->id = id;
-+		req->gref = ref;
++		kfree(bus_entry);
 +	}
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pcifront/pcifront.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pcifront/pcifront.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,42 @@
++/*
++ * PCI Frontend - Common data structures & function declarations
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIFRONT_H__
++#define __XEN_PCIFRONT_H__
 +
-+	if ( nr_flips != 0 ) {
-+		/* Tell the ballon driver what is going on. */
-+		balloon_update_driver_allowance(i);
++#include <linux/spinlock.h>
++#include <linux/pci.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/pciif.h>
++#include <xen/pcifront.h>
 +
-+		set_xen_guest_handle(reservation.extent_start,
-+				     np->rx_pfn_array);
-+		reservation.nr_extents   = nr_flips;
-+		reservation.extent_order = 0;
-+		reservation.address_bits = 0;
-+		reservation.domid        = DOMID_SELF;
++struct pci_bus_entry {
++	struct list_head list;
++	struct pci_bus *bus;
++};
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* After all PTEs have been zapped, flush the TLB. */
-+			np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
-+				UVMF_TLB_FLUSH|UVMF_ALL;
++struct pcifront_device {
++	struct xenbus_device *xdev;
++	struct list_head root_buses;
++	spinlock_t dev_lock;
 +
-+			/* Give away a batch of pages. */
-+			np->rx_mcl[i].op = __HYPERVISOR_memory_op;
-+			np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-+			np->rx_mcl[i].args[1] = (unsigned long)&reservation;
++	int evtchn;
++	int gnt_ref;
 +
-+			/* Zap PTEs and give away pages in one big
-+			 * multicall. */
-+			(void)HYPERVISOR_multicall(np->rx_mcl, i+1);
++	/* Lock this when doing any operations in sh_info */
++	spinlock_t sh_info_lock;
++	struct xen_pci_sharedinfo *sh_info;
++};
 +
-+			/* Check return status of HYPERVISOR_memory_op(). */
-+			if (unlikely(np->rx_mcl[i].result != i))
-+				panic("Unable to reduce memory reservation\n");
-+		} else {
-+			if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+						 &reservation) != i)
-+				panic("Unable to reduce memory reservation\n");
-+		}
-+	} else {
-+		wmb();
-+	}
++int pcifront_connect(struct pcifront_device *pdev);
++void pcifront_disconnect(struct pcifront_device *pdev);
 +
-+	/* Above is a suitable barrier to ensure backend will see requests. */
-+	np->rx.req_prod_pvt = req_prod + i;
-+ push:
-+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
-+	if (notify)
-+		notify_remote_via_irq(np->irq);
-+}
++int pcifront_scan_root(struct pcifront_device *pdev,
++		       unsigned int domain, unsigned int bus);
++int pcifront_rescan_root(struct pcifront_device *pdev,
++			 unsigned int domain, unsigned int bus);
++void pcifront_free_roots(struct pcifront_device *pdev);
 +
-+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
-+			      struct netif_tx_request *tx)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	char *data = skb->data;
-+	unsigned long mfn;
-+	RING_IDX prod = np->tx.req_prod_pvt;
-+	int frags = skb_shinfo(skb)->nr_frags;
-+	unsigned int offset = offset_in_page(data);
-+	unsigned int len = skb_headlen(skb);
-+	unsigned int id;
-+	grant_ref_t ref;
-+	int i;
++#endif	/* __XEN_PCIFRONT_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/pcifront/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/pcifront/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,455 @@
++/*
++ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include "pcifront.h"
 +
-+	while (len > PAGE_SIZE - offset) {
-+		tx->size = PAGE_SIZE - offset;
-+		tx->flags |= NETTXF_more_data;
-+		len -= tx->size;
-+		data += tx->size;
-+		offset = 0;
++#ifndef __init_refok
++#define __init_refok
++#endif
 +
-+		id = get_id_from_freelist(np->tx_skbs);
-+		np->tx_skbs[id] = skb_get(skb);
-+		tx = RING_GET_REQUEST(&np->tx, prod++);
-+		tx->id = id;
-+		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+		BUG_ON((signed short)ref < 0);
++#define INVALID_GRANT_REF (0)
++#define INVALID_EVTCHN    (-1)
 +
-+		mfn = virt_to_mfn(data);
-+		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-+						mfn, GNTMAP_readonly);
++static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
++{
++	struct pcifront_device *pdev;
 +
-+		tx->gref = np->grant_tx_ref[id] = ref;
-+		tx->offset = offset;
-+		tx->size = len;
-+		tx->flags = 0;
-+	}
++	pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
++	if (pdev == NULL)
++		goto out;
 +
-+	for (i = 0; i < frags; i++) {
-+		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
++	pdev->sh_info =
++	    (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
++	if (pdev->sh_info == NULL) {
++		kfree(pdev);
++		pdev = NULL;
++		goto out;
++	}
++	pdev->sh_info->flags = 0;
 +
-+		tx->flags |= NETTXF_more_data;
++	xdev->dev.driver_data = pdev;
++	pdev->xdev = xdev;
 +
-+		id = get_id_from_freelist(np->tx_skbs);
-+		np->tx_skbs[id] = skb_get(skb);
-+		tx = RING_GET_REQUEST(&np->tx, prod++);
-+		tx->id = id;
-+		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+		BUG_ON((signed short)ref < 0);
++	INIT_LIST_HEAD(&pdev->root_buses);
 +
-+		mfn = pfn_to_mfn(page_to_pfn(frag->page));
-+		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-+						mfn, GNTMAP_readonly);
++	spin_lock_init(&pdev->dev_lock);
++	spin_lock_init(&pdev->sh_info_lock);
 +
-+		tx->gref = np->grant_tx_ref[id] = ref;
-+		tx->offset = frag->page_offset;
-+		tx->size = frag->size;
-+		tx->flags = 0;
-+	}
++	pdev->evtchn = INVALID_EVTCHN;
++	pdev->gnt_ref = INVALID_GRANT_REF;
 +
-+	np->tx.req_prod_pvt = prod;
++	dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
++		pdev, pdev->sh_info);
++      out:
++	return pdev;
 +}
 +
-+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static void free_pdev(struct pcifront_device *pdev)
 +{
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct netif_tx_request *tx;
-+	struct netif_extra_info *extra;
-+	char *data = skb->data;
-+	RING_IDX i;
-+	grant_ref_t ref;
-+	unsigned long mfn;
-+	int notify;
-+	int frags = skb_shinfo(skb)->nr_frags;
-+	unsigned int offset = offset_in_page(data);
-+	unsigned int len = skb_headlen(skb);
-+
-+	frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
-+	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
-+		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
-+		       frags);
-+		dump_stack();
-+		goto drop;
-+	}
++	dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
 +
-+	spin_lock_irq(&np->tx_lock);
++	pcifront_free_roots(pdev);
 +
-+	if (unlikely(!netfront_carrier_ok(np) ||
-+		     (frags > 1 && !xennet_can_sg(dev)) ||
-+		     netif_needs_gso(dev, skb))) {
-+		spin_unlock_irq(&np->tx_lock);
-+		goto drop;
-+	}
++	if (pdev->evtchn != INVALID_EVTCHN)
++		xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
 +
-+	i = np->tx.req_prod_pvt;
++	if (pdev->gnt_ref != INVALID_GRANT_REF)
++		gnttab_end_foreign_access(pdev->gnt_ref,
++					  (unsigned long)pdev->sh_info);
 +
-+	id = get_id_from_freelist(np->tx_skbs);
-+	np->tx_skbs[id] = skb;
++	pdev->xdev->dev.driver_data = NULL;
 +
-+	tx = RING_GET_REQUEST(&np->tx, i);
++	kfree(pdev);
++}
 +
-+	tx->id   = id;
-+	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+	BUG_ON((signed short)ref < 0);
-+	mfn = virt_to_mfn(data);
-+	gnttab_grant_foreign_access_ref(
-+		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
-+	tx->gref = np->grant_tx_ref[id] = ref;
-+	tx->offset = offset;
-+	tx->size = len;
++static int pcifront_publish_info(struct pcifront_device *pdev)
++{
++	int err = 0;
++	struct xenbus_transaction trans;
 +
-+	tx->flags = 0;
-+	extra = NULL;
++	err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
++	if (err < 0)
++		goto out;
 +
-+	if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
-+		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
-+#ifdef CONFIG_XEN
-+	if (skb->proto_data_valid) /* remote but checksummed? */
-+		tx->flags |= NETTXF_data_validated;
-+#endif
++	pdev->gnt_ref = err;
 +
-+#ifdef HAVE_TSO
-+	if (skb_shinfo(skb)->gso_size) {
-+		struct netif_extra_info *gso = (struct netif_extra_info *)
-+			RING_GET_REQUEST(&np->tx, ++i);
++	err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
++	if (err)
++		goto out;
 +
-+		if (extra)
-+			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
-+		else
-+			tx->flags |= NETTXF_extra_info;
++      do_publish:
++	err = xenbus_transaction_start(&trans);
++	if (err) {
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error writing configuration for backend "
++				 "(start transaction)");
++		goto out;
++	}
 +
-+		gso->u.gso.size = skb_shinfo(skb)->gso_size;
-+		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
-+		gso->u.gso.pad = 0;
-+		gso->u.gso.features = 0;
++	err = xenbus_printf(trans, pdev->xdev->nodename,
++			    "pci-op-ref", "%u", pdev->gnt_ref);
++	if (!err)
++		err = xenbus_printf(trans, pdev->xdev->nodename,
++				    "event-channel", "%u", pdev->evtchn);
++	if (!err)
++		err = xenbus_printf(trans, pdev->xdev->nodename,
++				    "magic", XEN_PCI_MAGIC);
 +
-+		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-+		gso->flags = 0;
-+		extra = gso;
++	if (err) {
++		xenbus_transaction_end(trans, 1);
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error writing configuration for backend");
++		goto out;
++	} else {
++		err = xenbus_transaction_end(trans, 0);
++		if (err == -EAGAIN)
++			goto do_publish;
++		else if (err) {
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error completing transaction "
++					 "for backend");
++			goto out;
++		}
 +	}
-+#endif
 +
-+	np->tx.req_prod_pvt = i + 1;
++	xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
 +
-+	xennet_make_frags(skb, dev, tx);
-+	tx->size = skb->len;
++	dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
 +
-+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
-+	if (notify)
-+		notify_remote_via_irq(np->irq);
++      out:
++	return err;
++}
 +
-+	np->stats.tx_bytes += skb->len;
-+	np->stats.tx_packets++;
++static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
++{
++	int err = -EFAULT;
++	int i, num_roots, len;
++	char str[64];
++	unsigned int domain, bus;
 +
-+	/* Note: It is not safe to access skb after network_tx_buf_gc()! */
-+	network_tx_buf_gc(dev);
++	spin_lock(&pdev->dev_lock);
 +
-+	if (!netfront_tx_slot_available(np))
-+		netif_stop_queue(dev);
++	/* Only connect once */
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateInitialised)
++		goto out;
 +
-+	spin_unlock_irq(&np->tx_lock);
++	err = pcifront_connect(pdev);
++	if (err) {
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error connecting PCI Frontend");
++		goto out;
++	}
++
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++			   "root_num", "%d", &num_roots);
++	if (err == -ENOENT) {
++		xenbus_dev_error(pdev->xdev, err,
++				 "No PCI Roots found, trying 0000:00");
++		err = pcifront_scan_root(pdev, 0, 0);
++		num_roots = 0;
++	} else if (err != 1) {
++		if (err == 0)
++			err = -EINVAL;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading number of PCI roots");
++		goto out;
++	}
++
++	for (i = 0; i < num_roots; i++) {
++		len = snprintf(str, sizeof(str), "root-%d", i);
++		if (unlikely(len >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
++		}
++
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++				   "%x:%x", &domain, &bus);
++		if (err != 2) {
++			if (err >= 0)
++				err = -EINVAL;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error reading PCI root %d", i);
++			goto out;
++		}
++
++		err = pcifront_scan_root(pdev, domain, bus);
++		if (err) {
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error scanning PCI root %04x:%02x",
++					 domain, bus);
++			goto out;
++		}
++	}
 +
-+	return 0;
++	err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++	if (err)
++		goto out;
 +
-+ drop:
-+	np->stats.tx_dropped++;
-+	dev_kfree_skb(skb);
-+	return 0;
++      out:
++	spin_unlock(&pdev->dev_lock);
++	return err;
 +}
 +
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++static int pcifront_try_disconnect(struct pcifront_device *pdev)
 +{
-+	struct net_device *dev = dev_id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	unsigned long flags;
++	int err = 0;
++	enum xenbus_state prev_state;
 +
-+	spin_lock_irqsave(&np->tx_lock, flags);
++	spin_lock(&pdev->dev_lock);
 +
-+	if (likely(netfront_carrier_ok(np))) {
-+		network_tx_buf_gc(dev);
-+		/* Under tx_lock: protects access to rx shared-ring indexes. */
-+		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-+			netif_rx_schedule(dev);
++	prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
++
++	if (prev_state >= XenbusStateClosing)
++		goto out;
++
++	if(prev_state == XenbusStateConnected) {
++		pcifront_free_roots(pdev);
++		pcifront_disconnect(pdev);
 +	}
 +
-+	spin_unlock_irqrestore(&np->tx_lock, flags);
++	err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
 +
-+	return IRQ_HANDLED;
++      out:
++	spin_unlock(&pdev->dev_lock);
++
++	return err;
 +}
 +
-+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
-+				grant_ref_t ref)
++static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
 +{
-+	int new = xennet_rxidx(np->rx.req_prod_pvt);
-+
-+	BUG_ON(np->rx_skbs[new]);
-+	np->rx_skbs[new] = skb;
-+	np->grant_rx_ref[new] = ref;
-+	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
-+	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
-+	np->rx.req_prod_pvt++;
-+}
++	int err = -EFAULT;
++	int i, num_roots, len;
++	unsigned int domain, bus;
++	char str[64];
 +
-+int xennet_get_extras(struct netfront_info *np,
-+		      struct netif_extra_info *extras, RING_IDX rp)
++	spin_lock(&pdev->dev_lock);
 +
-+{
-+	struct netif_extra_info *extra;
-+	RING_IDX cons = np->rx.rsp_cons;
-+	int err = 0;
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateReconfiguring)
++		goto out;
 +
-+	do {
-+		struct sk_buff *skb;
-+		grant_ref_t ref;
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++			   "root_num", "%d", &num_roots);
++	if (err == -ENOENT) {
++		xenbus_dev_error(pdev->xdev, err,
++				 "No PCI Roots found, trying 0000:00");
++		err = pcifront_rescan_root(pdev, 0, 0);
++		num_roots = 0;
++	} else if (err != 1) {
++		if (err == 0)
++			err = -EINVAL;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading number of PCI roots");
++		goto out;
++	}
 +
-+		if (unlikely(cons + 1 == rp)) {
-+			if (net_ratelimit())
-+				WPRINTK("Missing extra info\n");
-+			err = -EBADR;
-+			break;
++	for (i = 0; i < num_roots; i++) {
++		len = snprintf(str, sizeof(str), "root-%d", i);
++		if (unlikely(len >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
 +		}
 +
-+		extra = (struct netif_extra_info *)
-+			RING_GET_RESPONSE(&np->rx, ++cons);
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++				   "%x:%x", &domain, &bus);
++		if (err != 2) {
++			if (err >= 0)
++				err = -EINVAL;
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error reading PCI root %d", i);
++			goto out;
++		}
 +
-+		if (unlikely(!extra->type ||
-+			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-+			if (net_ratelimit())
-+				WPRINTK("Invalid extra type: %d\n",
-+					extra->type);
-+			err = -EINVAL;
-+		} else {
-+			memcpy(&extras[extra->type - 1], extra,
-+			       sizeof(*extra));
++		err = pcifront_rescan_root(pdev, domain, bus);
++		if (err) {
++			xenbus_dev_fatal(pdev->xdev, err,
++					 "Error scanning PCI root %04x:%02x",
++					 domain, bus);
++			goto out;
 +		}
++	}
 +
-+		skb = xennet_get_rx_skb(np, cons);
-+		ref = xennet_get_rx_ref(np, cons);
-+		xennet_move_rx_slot(np, skb, ref);
-+	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
++	xenbus_switch_state(pdev->xdev, XenbusStateConnected);
 +
-+	np->rx.rsp_cons = cons;
++      out:
++	spin_unlock(&pdev->dev_lock);
 +	return err;
 +}
 +
-+static int xennet_get_responses(struct netfront_info *np,
-+				struct netfront_rx_info *rinfo, RING_IDX rp,
-+				struct sk_buff_head *list,
-+				int *pages_flipped_p)
++static int pcifront_detach_devices(struct pcifront_device *pdev)
 +{
-+	int pages_flipped = *pages_flipped_p;
-+	struct mmu_update *mmu;
-+	struct multicall_entry *mcl;
-+	struct netif_rx_response *rx = &rinfo->rx;
-+	struct netif_extra_info *extras = rinfo->extras;
-+	RING_IDX cons = np->rx.rsp_cons;
-+	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
-+	grant_ref_t ref = xennet_get_rx_ref(np, cons);
-+	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
-+	int frags = 1;
 +	int err = 0;
-+	unsigned long ret;
++	int i, num_devs;
++	unsigned int domain, bus, slot, func;
++	struct pci_bus *pci_bus;
++	struct pci_dev *pci_dev;
++	char str[64];
 +
-+	if (rx->flags & NETRXF_extra_info) {
-+		err = xennet_get_extras(np, extras, rp);
-+		cons = np->rx.rsp_cons;
-+	}
++	spin_lock(&pdev->dev_lock);
 +
-+	for (;;) {
-+		unsigned long mfn;
++	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++	    XenbusStateConnected)
++		goto out;
 +
-+		if (unlikely(rx->status < 0 ||
-+			     rx->offset + rx->status > PAGE_SIZE)) {
-+			if (net_ratelimit())
-+				WPRINTK("rx->offset: %x, size: %u\n",
-+					rx->offset, rx->status);
-+			xennet_move_rx_slot(np, skb, ref);
++	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
++			   &num_devs);
++	if (err != 1) {
++		if (err >= 0)
 +			err = -EINVAL;
-+			goto next;
++		xenbus_dev_fatal(pdev->xdev, err,
++				 "Error reading number of PCI devices");
++		goto out;
++	}
++
++	/* Find devices being detached and remove them. */
++	for (i = 0; i < num_devs; i++) {
++		int l, state;
++		l = snprintf(str, sizeof(str), "state-%d", i);
++		if (unlikely(l >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
 +		}
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
++				   &state);
++		if (err != 1)
++			state = XenbusStateUnknown;
 +
-+		/*
-+		 * This definitely indicates a bug, either in this driver or in
-+		 * the backend driver. In future this should flag the bad
-+		 * situation to the system controller to reboot the backed.
-+		 */
-+		if (ref == GRANT_INVALID_REF) {
-+			if (net_ratelimit())
-+				WPRINTK("Bad rx response id %d.\n", rx->id);
-+			err = -EINVAL;
-+			goto next;
++		if (state != XenbusStateClosing)
++			continue;
++
++		/* Remove device. */
++		l = snprintf(str, sizeof(str), "vdev-%d", i);
++		if (unlikely(l >= (sizeof(str) - 1))) {
++			err = -ENOMEM;
++			goto out;
++		}
++		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++			   	   "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++		if (err != 4) {
++			if (err >= 0)
++				err = -EINVAL;
++			xenbus_dev_fatal(pdev->xdev, err,
++				 	 "Error reading PCI device %d", i);
++			goto out;
 +		}
 +
-+		if (!np->copying_receiver) {
-+			/* Memory pressure, insufficient buffer
-+			 * headroom, ... */
-+			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
-+				if (net_ratelimit())
-+					WPRINTK("Unfulfilled rx req "
-+						"(id=%d, st=%d).\n",
-+						rx->id, rx->status);
-+				xennet_move_rx_slot(np, skb, ref);
-+				err = -ENOMEM;
-+				goto next;
-+			}
++		pci_bus = pci_find_bus(domain, bus);
++		if(!pci_bus) {
++			dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
++				domain, bus);
++			continue;
++		}
++		pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
++		if(!pci_dev) {
++			dev_dbg(&pdev->xdev->dev,
++				"Cannot get PCI device %04x:%02x:%02x.%02x\n",
++				domain, bus, slot, func);
++			continue;
++		}
++		pci_remove_bus_device(pci_dev);
++		pci_dev_put(pci_dev);
 +
-+			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+				/* Remap the page. */
-+				struct page *page =
-+					skb_shinfo(skb)->frags[0].page;
-+				unsigned long pfn = page_to_pfn(page);
-+				void *vaddr = page_address(page);
++		dev_dbg(&pdev->xdev->dev,
++			"PCI device %04x:%02x:%02x.%02x removed.\n",
++			domain, bus, slot, func);
++	}
 +
-+				mcl = np->rx_mcl + pages_flipped;
-+				mmu = np->rx_mmu + pages_flipped;
++	err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
 +
-+				MULTI_update_va_mapping(mcl,
-+							(unsigned long)vaddr,
-+							pfn_pte_ma(mfn,
-+								   PAGE_KERNEL),
-+							0);
-+				mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
-+					| MMU_MACHPHYS_UPDATE;
-+				mmu->val = pfn;
++      out:
++	spin_unlock(&pdev->dev_lock);
++	return err;
++}
 +
-+				set_phys_to_machine(pfn, mfn);
-+			}
-+			pages_flipped++;
-+		} else {
-+			ret = gnttab_end_foreign_access_ref(ref, 0);
-+			BUG_ON(!ret);
-+		}
++static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
++						  enum xenbus_state be_state)
++{
++	struct pcifront_device *pdev = xdev->dev.driver_data;
 +
-+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
++	switch (be_state) {
++	case XenbusStateUnknown:
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateClosed:
++		break;
 +
-+		__skb_queue_tail(list, skb);
++	case XenbusStateConnected:
++		pcifront_try_connect(pdev);
++		break;
 +
-+next:
-+		if (!(rx->flags & NETRXF_more_data))
-+			break;
++	case XenbusStateClosing:
++		dev_warn(&xdev->dev, "backend going away!\n");
++		pcifront_try_disconnect(pdev);
++		break;
 +
-+		if (cons + frags == rp) {
-+			if (net_ratelimit())
-+				WPRINTK("Need more frags\n");
-+			err = -ENOENT;
-+			break;
-+		}
++	case XenbusStateReconfiguring:
++		pcifront_detach_devices(pdev);
++		break;
 +
-+		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
-+		skb = xennet_get_rx_skb(np, cons + frags);
-+		ref = xennet_get_rx_ref(np, cons + frags);
-+		frags++;
++	case XenbusStateReconfigured:
++		pcifront_attach_devices(pdev);
++		break;
 +	}
++}
 +
-+	if (unlikely(frags > max)) {
-+		if (net_ratelimit())
-+			WPRINTK("Too many frags\n");
-+		err = -E2BIG;
-+	}
++static int pcifront_xenbus_probe(struct xenbus_device *xdev,
++				 const struct xenbus_device_id *id)
++{
++	int err = 0;
++	struct pcifront_device *pdev = alloc_pdev(xdev);
 +
-+	if (unlikely(err))
-+		np->rx.rsp_cons = cons + frags;
++	if (pdev == NULL) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(xdev, err,
++				 "Error allocating pcifront_device struct");
++		goto out;
++	}
 +
-+	*pages_flipped_p = pages_flipped;
++	err = pcifront_publish_info(pdev);
 +
++      out:
 +	return err;
 +}
 +
-+static RING_IDX xennet_fill_frags(struct netfront_info *np,
-+				  struct sk_buff *skb,
-+				  struct sk_buff_head *list)
++static int pcifront_xenbus_remove(struct xenbus_device *xdev)
 +{
-+	struct skb_shared_info *shinfo = skb_shinfo(skb);
-+	int nr_frags = shinfo->nr_frags;
-+	RING_IDX cons = np->rx.rsp_cons;
-+	skb_frag_t *frag = shinfo->frags + nr_frags;
-+	struct sk_buff *nskb;
-+
-+	while ((nskb = __skb_dequeue(list))) {
-+		struct netif_rx_response *rx =
-+			RING_GET_RESPONSE(&np->rx, ++cons);
++	if (xdev->dev.driver_data)
++		free_pdev(xdev->dev.driver_data);
 +
-+		frag->page = skb_shinfo(nskb)->frags[0].page;
-+		frag->page_offset = rx->offset;
-+		frag->size = rx->status;
++	return 0;
++}
 +
-+		skb->data_len += rx->status;
++static const struct xenbus_device_id xenpci_ids[] = {
++	{"pci"},
++	{{0}},
++};
++MODULE_ALIAS("xen:pci");
 +
-+		skb_shinfo(nskb)->nr_frags = 0;
-+		kfree_skb(nskb);
++static struct xenbus_driver xenbus_pcifront_driver = {
++	.name 			= "pcifront",
++	.owner 			= THIS_MODULE,
++	.ids 			= xenpci_ids,
++	.probe 			= pcifront_xenbus_probe,
++	.remove 		= pcifront_xenbus_remove,
++	.otherend_changed 	= pcifront_backend_changed,
++};
 +
-+		frag++;
-+		nr_frags++;
-+	}
++static int __init pcifront_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	shinfo->nr_frags = nr_frags;
-+	return cons;
++	return xenbus_register_frontend(&xenbus_pcifront_driver);
 +}
 +
-+static int xennet_set_skb_gso(struct sk_buff *skb,
-+			      struct netif_extra_info *gso)
++/* Initialize after the Xen PCI Frontend Stub is initialized */
++subsys_initcall(pcifront_init);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/privcmd/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/privcmd/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3 @@
++
++obj-y	+= privcmd.o
++obj-$(CONFIG_COMPAT)	+= compat_privcmd.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/privcmd/compat_privcmd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/privcmd/compat_privcmd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,73 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/compat.h>
++#include <linux/ioctl.h>
++#include <linux/syscalls.h>
++#include <asm/hypervisor.h>
++#include <asm/uaccess.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++
++int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg)
 +{
-+	if (!gso->u.gso.size) {
-+		if (net_ratelimit())
-+			WPRINTK("GSO size must not be zero.\n");
-+		return -EINVAL;
-+	}
++	int ret;
 +
-+	/* Currently only TCPv4 S.O. is supported. */
-+	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-+		if (net_ratelimit())
-+			WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
-+		return -EINVAL;
++	switch (cmd) {
++	case IOCTL_PRIVCMD_MMAP_32: {
++		struct privcmd_mmap *p;
++		struct privcmd_mmap_32 *p32;
++		struct privcmd_mmap_32 n32;
++
++		p32 = compat_ptr(arg);
++		p = compat_alloc_user_space(sizeof(*p));
++		if (copy_from_user(&n32, p32, sizeof(n32)) ||
++		    put_user(n32.num, &p->num) ||
++		    put_user(n32.dom, &p->dom) ||
++		    put_user(compat_ptr(n32.entry), &p->entry))
++			return -EFAULT;
++		
++		ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAP, (unsigned long)p);
++	}
++		break;
++	case IOCTL_PRIVCMD_MMAPBATCH_32: {
++		struct privcmd_mmapbatch *p;
++		struct privcmd_mmapbatch_32 *p32;
++		struct privcmd_mmapbatch_32 n32;
++
++		p32 = compat_ptr(arg);
++		p = compat_alloc_user_space(sizeof(*p));
++		if (copy_from_user(&n32, p32, sizeof(n32)) ||
++		    put_user(n32.num, &p->num) ||
++		    put_user(n32.dom, &p->dom) ||
++		    put_user(n32.addr, &p->addr) ||
++		    put_user(compat_ptr(n32.arr), &p->arr))
++			return -EFAULT;
++		
++		ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH, (unsigned long)p);
++	}
++		break;
++	default:
++		ret = -EINVAL;
++		break;
 +	}
++	return ret;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/privcmd/privcmd.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/privcmd/privcmd.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,356 @@
++/******************************************************************************
++ * privcmd.c
++ * 
++ * Interface to privileged domain-0 commands.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
++ */
 +
-+#ifdef HAVE_TSO
-+	skb_shinfo(skb)->gso_size = gso->u.gso.size;
-+#ifdef HAVE_GSO
-+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/smp_lock.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <asm/hypervisor.h>
 +
-+	/* Header must be checked, and gso_segs computed. */
-+	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-+#endif
-+	skb_shinfo(skb)->gso_segs = 0;
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/hypervisor.h>
++#include <xen/public/privcmd.h>
++#include <xen/interface/xen.h>
++#include <xen/xen_proc.h>
++#include <xen/features.h>
 +
-+	return 0;
-+#else
-+	if (net_ratelimit())
-+		WPRINTK("GSO unsupported by this kernel.\n");
-+	return -EINVAL;
++static struct proc_dir_entry *privcmd_intf;
++static struct proc_dir_entry *capabilities_intf;
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
 +#endif
-+}
 +
-+static int netif_poll(struct net_device *dev, int *pbudget)
++static long privcmd_ioctl(struct file *file,
++			  unsigned int cmd, unsigned long data)
 +{
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb;
-+	struct netfront_rx_info rinfo;
-+	struct netif_rx_response *rx = &rinfo.rx;
-+	struct netif_extra_info *extras = rinfo.extras;
-+	RING_IDX i, rp;
-+	struct multicall_entry *mcl;
-+	int work_done, budget, more_to_do = 1;
-+	struct sk_buff_head rxq;
-+	struct sk_buff_head errq;
-+	struct sk_buff_head tmpq;
-+	unsigned long flags;
-+	unsigned int len;
-+	int pages_flipped = 0;
-+	int err;
++	int ret = -ENOSYS;
++	void __user *udata = (void __user *) data;
 +
-+	spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
++	switch (cmd) {
++	case IOCTL_PRIVCMD_HYPERCALL: {
++		privcmd_hypercall_t hypercall;
++  
++		if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
++			return -EFAULT;
 +
-+	if (unlikely(!netfront_carrier_ok(np))) {
-+		spin_unlock(&np->rx_lock);
-+		return 0;
++#if defined(__i386__)
++		if (hypercall.op >= (PAGE_SIZE >> 5))
++			break;
++		__asm__ __volatile__ (
++			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
++			"pushl %%esi; pushl %%edi; "
++			"movl  8(%%eax),%%ebx ;"
++			"movl 16(%%eax),%%ecx ;"
++			"movl 24(%%eax),%%edx ;"
++			"movl 32(%%eax),%%esi ;"
++			"movl 40(%%eax),%%edi ;"
++			"movl   (%%eax),%%eax ;"
++			"shll $5,%%eax ;"
++			"addl $hypercall_page,%%eax ;"
++			"call *%%eax ;"
++			"popl %%edi; popl %%esi; popl %%edx; "
++			"popl %%ecx; popl %%ebx"
++			: "=a" (ret) : "0" (&hypercall) : "memory" );
++#elif defined (__x86_64__)
++		if (hypercall.op < (PAGE_SIZE >> 5)) {
++			long ign1, ign2, ign3;
++			__asm__ __volatile__ (
++				"movq %8,%%r10; movq %9,%%r8;"
++				"shll $5,%%eax ;"
++				"addq $hypercall_page,%%rax ;"
++				"call *%%rax"
++				: "=a" (ret), "=D" (ign1),
++				  "=S" (ign2), "=d" (ign3)
++				: "0" ((unsigned int)hypercall.op),
++				"1" (hypercall.arg[0]),
++				"2" (hypercall.arg[1]),
++				"3" (hypercall.arg[2]),
++				"g" (hypercall.arg[3]),
++				"g" (hypercall.arg[4])
++				: "r8", "r10", "memory" );
++		}
++#else
++		ret = privcmd_hypercall(&hypercall);
++#endif
 +	}
++	break;
 +
-+	skb_queue_head_init(&rxq);
-+	skb_queue_head_init(&errq);
-+	skb_queue_head_init(&tmpq);
-+
-+	if ((budget = *pbudget) > dev->quota)
-+		budget = dev->quota;
-+	rp = np->rx.sring->rsp_prod;
-+	rmb(); /* Ensure we see queued responses up to 'rp'. */
++	case IOCTL_PRIVCMD_MMAP: {
++#define MMAP_NR_PER_PAGE (int)((PAGE_SIZE-sizeof(struct list_head))/sizeof(privcmd_mmap_entry_t))
++		privcmd_mmap_t mmapcmd;
++		privcmd_mmap_entry_t *msg;
++		privcmd_mmap_entry_t __user *p;
++		struct mm_struct *mm = current->mm;
++		struct vm_area_struct *vma;
++		unsigned long va;
++		int i, rc;
++		LIST_HEAD(pagelist);
++		struct list_head *l,*l2;
 +
-+	i = np->rx.rsp_cons;
-+	work_done = 0;
-+	while ((i != rp) && (work_done < budget)) {
-+		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
-+		memset(extras, 0, sizeof(rinfo.extras));
++		if (!is_initial_xendomain())
++			return -EPERM;
 +
-+		err = xennet_get_responses(np, &rinfo, rp, &tmpq,
-+					   &pages_flipped);
++		if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
++			return -EFAULT;
 +
-+		if (unlikely(err)) {
-+err:	
-+			while ((skb = __skb_dequeue(&tmpq)))
-+				__skb_queue_tail(&errq, skb);
-+			np->stats.rx_errors++;
-+			i = np->rx.rsp_cons;
-+			continue;
-+		}
++		p = mmapcmd.entry;
++		for (i = 0; i < mmapcmd.num;) {
++			int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
 +
-+		skb = __skb_dequeue(&tmpq);
++			rc = -ENOMEM;
++			l = (struct list_head *) __get_free_page(GFP_KERNEL);
++			if (l == NULL)
++				goto mmap_out;
 +
-+		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-+			struct netif_extra_info *gso;
-+			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++			INIT_LIST_HEAD(l);
++			list_add_tail(l, &pagelist);
++			msg = (privcmd_mmap_entry_t*)(l + 1);
 +
-+			if (unlikely(xennet_set_skb_gso(skb, gso))) {
-+				__skb_queue_head(&tmpq, skb);
-+				np->rx.rsp_cons += skb_queue_len(&tmpq);
-+				goto err;
-+			}
++			rc = -EFAULT;
++			if (copy_from_user(msg, p, nr*sizeof(*msg)))
++				goto mmap_out;
++			i += nr;
++			p += nr;
 +		}
 +
-+		NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
-+		NETFRONT_SKB_CB(skb)->offset = rx->offset;
++		l = pagelist.next;
++		msg = (privcmd_mmap_entry_t*)(l + 1);
 +
-+		len = rx->status;
-+		if (len > RX_COPY_THRESHOLD)
-+			len = RX_COPY_THRESHOLD;
-+		skb_put(skb, len);
++		down_write(&mm->mmap_sem);
 +
-+		if (rx->status > len) {
-+			skb_shinfo(skb)->frags[0].page_offset =
-+				rx->offset + len;
-+			skb_shinfo(skb)->frags[0].size = rx->status - len;
-+			skb->data_len = rx->status - len;
-+		} else {
-+			skb_shinfo(skb)->frags[0].page = NULL;
-+			skb_shinfo(skb)->nr_frags = 0;
-+		}
++		vma = find_vma(mm, msg->va);
++		rc = -EINVAL;
++		if (!vma || (msg->va != vma->vm_start) ||
++		    !privcmd_enforce_singleshot_mapping(vma))
++			goto mmap_out;
 +
-+		i = xennet_fill_frags(np, skb, &tmpq);
++		va = vma->vm_start;
 +
-+		/*
-+		 * Truesize must approximates the size of true data plus
-+		 * any supervisor overheads. Adding hypervisor overheads
-+		 * has been shown to significantly reduce achievable
-+		 * bandwidth with the default receive buffer size. It is
-+		 * therefore not wise to account for it here.
-+		 *
-+		 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
-+		 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
-+		 * add the size of the data pulled in xennet_fill_frags().
-+		 *
-+		 * We also adjust for any unused space in the main data
-+		 * area by subtracting (RX_COPY_THRESHOLD - len). This is
-+		 * especially important with drivers which split incoming
-+		 * packets into header and data, using only 66 bytes of
-+		 * the main data area (see the e1000 driver for example.)
-+		 * On such systems, without this last adjustement, our
-+		 * achievable receive throughout using the standard receive
-+		 * buffer size was cut by 25%(!!!).
-+		 */
-+		skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
-+		skb->len += skb->data_len;
++		i = 0;
++		list_for_each(l, &pagelist) {
++			int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
 +
-+		/*
-+		 * Old backends do not assert data_validated but we
-+		 * can infer it from csum_blank so test both flags.
-+		 */
-+		if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+		else
-+			skb->ip_summed = CHECKSUM_NONE;
-+#ifdef CONFIG_XEN
-+		skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
-+		skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
-+#endif
-+		np->stats.rx_packets++;
-+		np->stats.rx_bytes += skb->len;
++			msg = (privcmd_mmap_entry_t*)(l + 1);
++			while (i<nr) {
 +
-+		__skb_queue_tail(&rxq, skb);
++				/* Do not allow range to wrap the address space. */
++				rc = -EINVAL;
++				if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
++				    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -va))
++					goto mmap_out;
++
++				/* Range chunks must be contiguous in va space. */
++				if ((msg->va != va) ||
++				    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
++					goto mmap_out;
++
++				if ((rc = direct_remap_pfn_range(
++					     vma,
++					     msg->va & PAGE_MASK,
++					     msg->mfn,
++					     msg->npages << PAGE_SHIFT,
++					     vma->vm_page_prot,
++					     mmapcmd.dom)) < 0)
++					goto mmap_out;
 +
-+		np->rx.rsp_cons = ++i;
-+		work_done++;
-+	}
++				va += msg->npages << PAGE_SHIFT;
++				msg++;
++				i++;
++			}
++		}
 +
-+	if (pages_flipped) {
-+		/* Some pages are no longer absent... */
-+		balloon_update_driver_allowance(-pages_flipped);
++		rc = 0;
 +
-+		/* Do all the remapping work and M2P updates. */
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			mcl = np->rx_mcl + pages_flipped;
-+			mcl->op = __HYPERVISOR_mmu_update;
-+			mcl->args[0] = (unsigned long)np->rx_mmu;
-+			mcl->args[1] = pages_flipped;
-+			mcl->args[2] = 0;
-+			mcl->args[3] = DOMID_SELF;
-+			(void)HYPERVISOR_multicall(np->rx_mcl,
-+						   pages_flipped + 1);
-+		}
++	mmap_out:
++		up_write(&mm->mmap_sem);
++		list_for_each_safe(l,l2,&pagelist)
++			free_page((unsigned long)l);
++		ret = rc;
 +	}
++#undef MMAP_NR_PER_PAGE
++	break;
 +
-+	while ((skb = __skb_dequeue(&errq)))
-+		kfree_skb(skb);
++	case IOCTL_PRIVCMD_MMAPBATCH: {
++#define MMAPBATCH_NR_PER_PAGE (unsigned long)((PAGE_SIZE-sizeof(struct list_head))/sizeof(unsigned long))
++		privcmd_mmapbatch_t m;
++		struct mm_struct *mm = current->mm;
++		struct vm_area_struct *vma;
++		xen_pfn_t __user *p;
++		unsigned long addr, *mfn, nr_pages;
++		int i;
++		LIST_HEAD(pagelist);
++		struct list_head *l, *l2;
 +
-+	while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+		struct page *page = NETFRONT_SKB_CB(skb)->page;
-+		void *vaddr = page_address(page);
-+		unsigned offset = NETFRONT_SKB_CB(skb)->offset;
++		if (!is_initial_xendomain())
++			return -EPERM;
 +
-+		memcpy(skb->data, vaddr + offset, skb_headlen(skb));
++		if (copy_from_user(&m, udata, sizeof(m)))
++			return -EFAULT;
 +
-+		if (page != skb_shinfo(skb)->frags[0].page)
-+			__free_page(page);
++		nr_pages = m.num;
++		if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
++			return -EINVAL;
 +
-+		/* Ethernet work: Delayed to here as it peeks the header. */
-+		skb->protocol = eth_type_trans(skb, dev);
++		p = m.arr;
++		for (i=0; i<nr_pages; )	{
++			int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
 +
-+		/* Pass it up. */
-+		netif_receive_skb(skb);
-+		dev->last_rx = jiffies;
-+	}
++			ret = -ENOMEM;
++			l = (struct list_head *)__get_free_page(GFP_KERNEL);
++			if (l == NULL)
++				goto mmapbatch_out;
 +
-+	/* If we get a callback with very few responses, reduce fill target. */
-+	/* NB. Note exponential increase, linear decrease. */
-+	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
-+	     ((3*np->rx_target) / 4)) &&
-+	    (--np->rx_target < np->rx_min_target))
-+		np->rx_target = np->rx_min_target;
++			INIT_LIST_HEAD(l);
++			list_add_tail(l, &pagelist);
 +
-+	network_alloc_rx_buffers(dev);
++			mfn = (unsigned long*)(l + 1);
++			ret = -EFAULT;
++			if (copy_from_user(mfn, p, nr*sizeof(*mfn)))
++				goto mmapbatch_out;
 +
-+	*pbudget   -= work_done;
-+	dev->quota -= work_done;
++			i += nr; p+= nr;
++		}
 +
-+	if (work_done < budget) {
-+		local_irq_save(flags);
++		down_write(&mm->mmap_sem);
 +
-+		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
-+		if (!more_to_do)
-+			__netif_rx_complete(dev);
++		vma = find_vma(mm, m.addr);
++		ret = -EINVAL;
++		if (!vma ||
++		    (m.addr != vma->vm_start) ||
++		    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
++		    !privcmd_enforce_singleshot_mapping(vma)) {
++			up_write(&mm->mmap_sem);
++			goto mmapbatch_out;
++		}
 +
-+		local_irq_restore(flags);
++		p = m.arr;
++		addr = m.addr;
++		i = 0;
++		ret = 0;
++		list_for_each(l, &pagelist) {
++			int nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++			mfn = (unsigned long *)(l + 1);
++
++			while (i<nr) {
++				if(direct_remap_pfn_range(vma, addr & PAGE_MASK,
++							  *mfn, PAGE_SIZE,
++							  vma->vm_page_prot, m.dom) < 0) {
++					*mfn |= 0xf0000000U;
++					ret++;
++				}
++				mfn++; i++; addr += PAGE_SIZE;
++			}
++		}
++
++		up_write(&mm->mmap_sem);
++		if (ret > 0) {
++			p = m.arr;
++			i = 0;
++			ret = 0;
++			list_for_each(l, &pagelist) {
++				int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++				mfn = (unsigned long *)(l + 1);
++				if (copy_to_user(p, mfn, nr*sizeof(*mfn)))
++					ret = -EFAULT;
++				i += nr; p += nr;
++			}
++		}
++	mmapbatch_out:
++		list_for_each_safe(l,l2,&pagelist)
++			free_page((unsigned long)l);
++#undef MMAPBATCH_NR_PER_PAGE
 +	}
++	break;
 +
-+	spin_unlock(&np->rx_lock);
++	default:
++		ret = -EINVAL;
++		break;
++	}
 +
-+	return more_to_do;
++	return ret;
 +}
 +
-+static void netif_release_tx_bufs(struct netfront_info *np)
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static struct page *privcmd_nopage(struct vm_area_struct *vma,
++				   unsigned long address,
++				   int *type)
 +{
-+	struct sk_buff *skb;
-+	int i;
++	return NOPAGE_SIGBUS;
++}
 +
-+	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
-+		if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
-+			continue;
++static struct vm_operations_struct privcmd_vm_ops = {
++	.nopage = privcmd_nopage
++};
 +
-+		skb = np->tx_skbs[i];
-+		gnttab_end_foreign_access_ref(
-+			np->grant_tx_ref[i], GNTMAP_readonly);
-+		gnttab_release_grant_reference(
-+			&np->gref_tx_head, np->grant_tx_ref[i]);
-+		np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+		add_id_to_freelist(np->tx_skbs, i);
-+		dev_kfree_skb_irq(skb);
-+	}
++static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++	/* Unsupported for auto-translate guests. */
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return -ENOSYS;
++
++	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
++	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++	vma->vm_ops = &privcmd_vm_ops;
++	vma->vm_private_data = NULL;
++
++	return 0;
 +}
 +
-+static void netif_release_rx_bufs_flip(struct netfront_info *np)
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
 +{
-+	struct mmu_update      *mmu = np->rx_mmu;
-+	struct multicall_entry *mcl = np->rx_mcl;
-+	struct sk_buff_head free_list;
-+	struct sk_buff *skb;
-+	unsigned long mfn;
-+	int xfer = 0, noxfer = 0, unused = 0;
-+	int id, ref, rc;
++	return (xchg(&vma->vm_private_data, (void *)1) == NULL);
++}
++#endif
 +
-+	skb_queue_head_init(&free_list);
++static const struct file_operations privcmd_file_ops = {
++	.unlocked_ioctl = privcmd_ioctl,
++	.mmap = privcmd_mmap,
++};
 +
-+	spin_lock_bh(&np->rx_lock);
++static int capabilities_read(char *page, char **start, off_t off,
++			     int count, int *eof, void *data)
++{
++	int len = 0;
++	*page = 0;
 +
-+	for (id = 0; id < NET_RX_RING_SIZE; id++) {
-+		if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
-+			unused++;
-+			continue;
-+		}
++	if (is_initial_xendomain())
++		len = sprintf( page, "control_d\n" );
 +
-+		skb = np->rx_skbs[id];
-+		mfn = gnttab_end_foreign_transfer_ref(ref);
-+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+		np->grant_rx_ref[id] = GRANT_INVALID_REF;
-+		add_id_to_freelist(np->rx_skbs, id);
++	*eof = 1;
++	return len;
++}
 +
-+		if (0 == mfn) {
-+			struct page *page = skb_shinfo(skb)->frags[0].page;
-+			balloon_release_driver_page(page);
-+			skb_shinfo(skb)->nr_frags = 0;
-+			dev_kfree_skb(skb);
-+			noxfer++;
-+			continue;
-+		}
++static int __init privcmd_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* Remap the page. */
-+			struct page *page = skb_shinfo(skb)->frags[0].page;
-+			unsigned long pfn = page_to_pfn(page);
-+			void *vaddr = page_address(page);
++	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
++	if (privcmd_intf != NULL)
++		privcmd_intf->proc_fops = &privcmd_file_ops;
 +
-+			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
-+						pfn_pte_ma(mfn, PAGE_KERNEL),
-+						0);
-+			mcl++;
-+			mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
-+				| MMU_MACHPHYS_UPDATE;
-+			mmu->val = pfn;
-+			mmu++;
++	capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
++	if (capabilities_intf != NULL)
++		capabilities_intf->read_proc = capabilities_read;
 +
-+			set_phys_to_machine(pfn, mfn);
-+		}
-+		__skb_queue_tail(&free_list, skb);
-+		xfer++;
-+	}
++	return 0;
++}
 +
-+	DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
-+		__FUNCTION__, xfer, noxfer, unused);
++__initcall(privcmd_init);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_XEN_SCSI_BACKEND) := xen-scsibk.o
 +
-+	if (xfer) {
-+		/* Some pages are no longer absent... */
-+		balloon_update_driver_allowance(-xfer);
++xen-scsibk-y	:= interface.o scsiback.o xenbus.o translate.o emulate.o
 +
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* Do all the remapping work and M2P updates. */
-+			mcl->op = __HYPERVISOR_mmu_update;
-+			mcl->args[0] = (unsigned long)np->rx_mmu;
-+			mcl->args[1] = mmu - np->rx_mmu;
-+			mcl->args[2] = 0;
-+			mcl->args[3] = DOMID_SELF;
-+			mcl++;
-+			rc = HYPERVISOR_multicall_check(
-+				np->rx_mcl, mcl - np->rx_mcl, NULL);
-+			BUG_ON(rc);
-+		}
-+	}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,181 @@
++/*
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	while ((skb = __skb_dequeue(&free_list)) != NULL)
-+		dev_kfree_skb(skb);
++#ifndef __SCSIIF__BACKEND__COMMON_H__
++#define __SCSIIF__BACKEND__COMMON_H__
 +
-+	spin_unlock_bh(&np->rx_lock);
-+}
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/kthread.h>
++#include <linux/blkdev.h>
++#include <linux/list.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <asm/delay.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/vscsiif.h>
 +
-+static void netif_release_rx_bufs_copy(struct netfront_info *np)
-+{
-+	struct sk_buff *skb;
-+	int i, ref;
-+	int busy = 0, inuse = 0;
 +
-+	spin_lock_bh(&np->rx_lock);
++#define DPRINTK(_f, _a...)			\
++	pr_debug("(file=%s, line=%d) " _f,	\
++		 __FILE__ , __LINE__ , ## _a )
 +
-+	for (i = 0; i < NET_RX_RING_SIZE; i++) {
-+		ref = np->grant_rx_ref[i];
++struct ids_tuple {
++	unsigned int hst;		/* host    */
++	unsigned int chn;		/* channel */
++	unsigned int tgt;		/* target  */
++	unsigned int lun;		/* LUN     */
++};
 +
-+		if (ref == GRANT_INVALID_REF)
-+			continue;
++struct v2p_entry {
++	struct ids_tuple v;		/* translate from */
++	struct scsi_device *sdev;	/* translate to   */
++	struct list_head l;
++};
 +
-+		inuse++;
++struct vscsibk_info {
++	struct xenbus_device *dev;
 +
-+		skb = np->rx_skbs[i];
++	domid_t domid;
++	unsigned int evtchn;
++	unsigned int irq;
 +
-+		if (!gnttab_end_foreign_access_ref(ref, 0))
-+		{
-+			busy++;
-+			continue;
-+		}
++	struct vscsiif_back_ring  ring;
++	struct vm_struct *ring_area;
++	grant_handle_t shmem_handle;
++	grant_ref_t shmem_ref;
 +
-+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+		np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+		add_id_to_freelist(np->rx_skbs, i);
++	spinlock_t ring_lock;
++	atomic_t nr_unreplied_reqs;
 +
-+		skb_shinfo(skb)->nr_frags = 0;
-+		dev_kfree_skb(skb);
-+	}
++	spinlock_t v2p_lock;
++	struct list_head v2p_entry_lists;
 +
-+	if (busy)
-+		DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
-+			__FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
++	struct task_struct *kthread;
++	wait_queue_head_t waiting_to_free;
++	wait_queue_head_t wq;
++	unsigned int waiting_reqs;
++	struct page **mmap_pages;
 +
-+	spin_unlock_bh(&np->rx_lock);
-+}
++};
 +
-+static int network_close(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	netif_stop_queue(np->netdev);
-+	return 0;
-+}
++typedef struct {
++	unsigned char act;
++	struct vscsibk_info *info;
++	struct scsi_device *sdev;
 +
++	uint16_t rqid;
++	
++	uint8_t nr_segments;
++	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++	uint8_t cmd_len;
 +
-+static struct net_device_stats *network_get_stats(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	return &np->stats;
-+}
++	uint8_t sc_data_direction;
++	uint16_t timeout_per_command;
++	
++	uint32_t request_bufflen;
++	struct scatterlist *sgl;
++	grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
++
++	int32_t rslt;
++	uint32_t resid;
++	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
 +
-+static int xennet_change_mtu(struct net_device *dev, int mtu)
-+{
-+	int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++	struct list_head free_list;
++} pending_req_t;
 +
-+	if (mtu > max)
-+		return -EINVAL;
-+	dev->mtu = mtu;
-+	return 0;
-+}
 +
-+static int xennet_set_sg(struct net_device *dev, u32 data)
-+{
-+	if (data) {
-+		struct netfront_info *np = netdev_priv(dev);
-+		int val;
 +
-+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
-+				 "%d", &val) < 0)
-+			val = 0;
-+		if (!val)
-+			return -ENOSYS;
-+	} else if (dev->mtu > ETH_DATA_LEN)
-+		dev->mtu = ETH_DATA_LEN;
++#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
++#define scsiback_put(_b)				\
++	do {						\
++		if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs))	\
++			wake_up(&(_b)->waiting_to_free);\
++	} while (0)
 +
-+	return ethtool_op_set_sg(dev, data);
-+}
++#define VSCSIIF_TIMEOUT		(900*HZ)
 +
-+static int xennet_set_tso(struct net_device *dev, u32 data)
-+{
-+#ifdef HAVE_TSO
-+	if (data) {
-+		struct netfront_info *np = netdev_priv(dev);
-+		int val;
 +
-+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+				 "feature-gso-tcpv4", "%d", &val) < 0)
-+			val = 0;
-+		if (!val)
-+			return -ENOSYS;
-+	}
++irqreturn_t scsiback_intr(int, void *, struct pt_regs *);
++int scsiback_init_sring(struct vscsibk_info *info,
++		unsigned long ring_ref, unsigned int evtchn);
++int scsiback_schedule(void *data);
 +
-+	return ethtool_op_set_tso(dev, data);
-+#else
-+	return -ENOSYS;
-+#endif
-+}
 +
-+static void xennet_set_features(struct net_device *dev)
-+{
-+	dev_disable_gso_features(dev);
-+	xennet_set_sg(dev, 0);
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid);
++void scsiback_free(struct vscsibk_info *info);
++void scsiback_disconnect(struct vscsibk_info *info);
++int __init scsiback_interface_init(void);
++void scsiback_interface_exit(void);
++int scsiback_xenbus_init(void);
++void scsiback_xenbus_unregister(void);
 +
-+	/* We need checksum offload to enable scatter/gather and TSO. */
-+	if (!(dev->features & NETIF_F_IP_CSUM))
-+		return;
++void scsiback_init_translation_table(struct vscsibk_info *info);
 +
-+	if (xennet_set_sg(dev, 1))
-+		return;
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++			struct scsi_device *sdev, struct ids_tuple *v);
 +
-+	/* Before 2.6.9 TSO seems to be unreliable so do not enable it
-+	 * on older kernels.
-+	 */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
-+	xennet_set_tso(dev, 1);
-+#endif
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++				struct ids_tuple *v);
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++			struct ids_tuple *v);
++void scsiback_release_translation_entry(struct vscsibk_info *info);
 +
-+}
 +
-+static int network_connect(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	int i, requeue_idx, err;
-+	struct sk_buff *skb;
-+	grant_ref_t ref;
-+	netif_rx_request_t *req;
-+	unsigned int feature_rx_copy, feature_rx_flip;
++void scsiback_cmd_exec(pending_req_t *pending_req);
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++			uint32_t resid, pending_req_t *pending_req);
++void scsiback_fast_flush_area(pending_req_t *req);
 +
-+	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+			   "feature-rx-copy", "%u", &feature_rx_copy);
-+	if (err != 1)
-+		feature_rx_copy = 0;
-+	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+			   "feature-rx-flip", "%u", &feature_rx_flip);
-+	if (err != 1)
-+		feature_rx_flip = 1;
++void scsiback_rsp_emulation(pending_req_t *pending_req);
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req);
++void scsiback_emulation_init(void);
 +
-+	/*
-+	 * Copy packets on receive path if:
-+	 *  (a) This was requested by user, and the backend supports it; or
-+	 *  (b) Flipping was requested, but this is unsupported by the backend.
-+	 */
-+	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
-+				(MODPARM_rx_flip && !feature_rx_flip));
 +
-+	err = talk_to_backend(np->xbdev, np);
-+	if (err)
-+		return err;
++#endif /* __SCSIIF__BACKEND__COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/emulate.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/emulate.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,454 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	xennet_set_features(dev);
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
 +
-+	DPRINTK("device %s has %sing receive path.\n",
-+		dev->name, np->copying_receiver ? "copy" : "flipp");
++/* Following SCSI commands are not defined in scsi/scsi.h */
++#define EXTENDED_COPY		0x83	/* EXTENDED COPY command        */
++#define REPORT_ALIASES		0xa3	/* REPORT ALIASES command       */
++#define CHANGE_ALIASES		0xa4	/* CHANGE ALIASES command       */
++#define SET_PRIORITY		0xa4	/* SET PRIORITY command         */
 +
-+	spin_lock_bh(&np->rx_lock);
-+	spin_lock_irq(&np->tx_lock);
 +
-+	/*
-+	 * Recovery procedure:
-+	 *  NB. Freelist index entries are always going to be less than
-+	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
-+	 *  greater than PAGE_OFFSET: we use this property to distinguish
-+	 *  them.
-+	 */
++/*
++  The bitmap in order to control emulation.
++  (Bit 3 to 7 are reserved for future use.)
++*/
++#define VSCSIIF_NEED_CMD_EXEC		0x01	/* If this bit is set, cmd exec	*/
++						/* is required.			*/
++#define VSCSIIF_NEED_EMULATE_REQBUF	0x02	/* If this bit is set, need	*/
++						/* emulation reqest buff before	*/
++						/* cmd exec.			*/
++#define VSCSIIF_NEED_EMULATE_RSPBUF	0x04	/* If this bit is set, need	*/
++						/* emulation resp buff after	*/
++						/* cmd exec.			*/
++
++/* Additional Sense Code (ASC) used */
++#define NO_ADDITIONAL_SENSE		0x0
++#define LOGICAL_UNIT_NOT_READY		0x4
++#define UNRECOVERED_READ_ERR		0x11
++#define PARAMETER_LIST_LENGTH_ERR	0x1a
++#define INVALID_OPCODE			0x20
++#define ADDR_OUT_OF_RANGE		0x21
++#define INVALID_FIELD_IN_CDB		0x24
++#define INVALID_FIELD_IN_PARAM_LIST	0x26
++#define POWERON_RESET			0x29
++#define SAVING_PARAMS_UNSUP		0x39
++#define THRESHOLD_EXCEEDED		0x5d
++#define LOW_POWER_COND_ON		0x5e
 +
-+	/* Step 1: Discard all pending TX packet fragments. */
-+	netif_release_tx_bufs(np);
 +
-+	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
-+	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
-+		if (!np->rx_skbs[i])
-+			continue;
 +
-+		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
-+		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
-+		req = RING_GET_REQUEST(&np->rx, requeue_idx);
++/* Number os SCSI op_code	*/
++#define VSCSI_MAX_SCSI_OP_CODE		256
++static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
 +
-+		if (!np->copying_receiver) {
-+			gnttab_grant_foreign_transfer_ref(
-+				ref, np->xbdev->otherend_id,
-+				page_to_pfn(skb_shinfo(skb)->frags->page));
-+		} else {
-+			gnttab_grant_foreign_access_ref(
-+				ref, np->xbdev->otherend_id,
-+				pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
-+						       frags->page)),
-+				0);
-+		}
-+		req->gref = ref;
-+		req->id   = requeue_idx;
 +
-+		requeue_idx++;
-+	}
 +
-+	np->rx.req_prod_pvt = requeue_idx;
++/*
++  Emulation routines for each SCSI op_code.
++*/
++static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
++static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
 +
-+	/*
-+	 * Step 3: All public and private state should now be sane.  Get
-+	 * ready to start sending and receiving packets and give the driver
-+	 * domain a kick because we've probably just requeued some
-+	 * packets.
-+	 */
-+	netfront_carrier_on(np);
-+	notify_remote_via_irq(np->irq);
-+	network_tx_buf_gc(dev);
-+	network_alloc_rx_buffers(dev);
 +
-+	spin_unlock_irq(&np->tx_lock);
-+	spin_unlock_bh(&np->rx_lock);
++static const int check_condition_result =
++		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 +
-+	return 0;
++static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
++			uint8_t asc, uint8_t asq)
++{
++	data[0] = 0x70;  /* fixed, current */
++	data[2] = key;
++	data[7] = 0xa;	  /* implies 18 byte sense buffer */
++	data[12] = asc;
++	data[13] = asq;
 +}
 +
-+static void netif_uninit(struct net_device *dev)
++static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
 +{
-+	struct netfront_info *np = netdev_priv(dev);
-+	netif_release_tx_bufs(np);
-+	if (np->copying_receiver)
-+		netif_release_rx_bufs_copy(np);
-+	else
-+		netif_release_rx_bufs_flip(np);
-+	gnttab_free_grant_references(np->gref_tx_head);
-+	gnttab_free_grant_references(np->gref_rx_head);
++	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++		INVALID_OPCODE, 0);
++	pending_req->resid = 0;
++	pending_req->rslt  = check_condition_result;
 +}
 +
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+	.get_tx_csum = ethtool_op_get_tx_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
-+	.get_sg = ethtool_op_get_sg,
-+	.set_sg = xennet_set_sg,
-+	.get_tso = ethtool_op_get_tso,
-+	.set_tso = xennet_set_tso,
-+	.get_link = ethtool_op_get_link,
-+};
 +
-+#ifdef CONFIG_SYSFS
-+static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
++static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg,
++	       void *buf, unsigned int buflen)
 +{
-+	struct net_device *netdev = container_of(cd, struct net_device,
-+						 class_dev);
-+	struct netfront_info *info = netdev_priv(netdev);
++	void *from = buf;
++	void *to;
++	unsigned int from_rest = buflen;
++	unsigned int to_capa;
++	unsigned int copy_size = 0;
++	unsigned int i;
++	unsigned long pfn;
 +
-+	return sprintf(buf, "%u\n", info->rx_min_target);
++	for (i = 0; i < nr_sg; i++) {
++		if (sg->page == NULL) {
++			printk(KERN_WARNING "%s: inconsistent length field in "
++			       "scatterlist\n", __FUNCTION__);
++			return -ENOMEM;
++		}
++
++		to_capa  = sg->length;
++		copy_size = min_t(unsigned int, to_capa, from_rest);
++
++		pfn = page_to_pfn(sg->page);
++		to = pfn_to_kaddr(pfn) + (sg->offset);
++		memcpy(to, from, copy_size);
++
++		from_rest  -= copy_size;
++		if (from_rest == 0) {
++			return 0;
++		}
++		
++		sg++;
++		from += copy_size;
++	}
++
++	printk(KERN_WARNING "%s: no space in scatterlist\n",
++	       __FUNCTION__);
++	return -ENOMEM;
 +}
 +
-+static ssize_t store_rxbuf_min(struct class_device *cd,
-+			       const char *buf, size_t len)
++static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg,
++		 void *buf, unsigned int buflen)
 +{
-+	struct net_device *netdev = container_of(cd, struct net_device,
-+						 class_dev);
-+	struct netfront_info *np = netdev_priv(netdev);
-+	char *endp;
-+	unsigned long target;
-+
-+	if (!capable(CAP_NET_ADMIN))
-+		return -EPERM;
++	void *from;
++	void *to = buf;
++	unsigned int from_rest;
++	unsigned int to_capa = buflen;
++	unsigned int copy_size;
++	unsigned int i;
++	unsigned long pfn;
 +
-+	target = simple_strtoul(buf, &endp, 0);
-+	if (endp == buf)
-+		return -EBADMSG;
++	for (i = 0; i < nr_sg; i++) {
++		if (sg->page == NULL) {
++			printk(KERN_WARNING "%s: inconsistent length field in "
++			       "scatterlist\n", __FUNCTION__);
++			return -ENOMEM;
++		}
 +
-+	if (target < RX_MIN_TARGET)
-+		target = RX_MIN_TARGET;
-+	if (target > RX_MAX_TARGET)
-+		target = RX_MAX_TARGET;
++		from_rest = sg->length;
++		if ((from_rest > 0) && (to_capa < from_rest)) {
++			printk(KERN_WARNING
++			       "%s: no space in destination buffer\n",
++			       __FUNCTION__);
++			return -ENOMEM;
++		}
++		copy_size = from_rest;
 +
-+	spin_lock_bh(&np->rx_lock);
-+	if (target > np->rx_max_target)
-+		np->rx_max_target = target;
-+	np->rx_min_target = target;
-+	if (target > np->rx_target)
-+		np->rx_target = target;
++		pfn = page_to_pfn(sg->page);
++		from = pfn_to_kaddr(pfn) + (sg->offset);
++		memcpy(to, from, copy_size);
 +
-+	network_alloc_rx_buffers(netdev);
++		to_capa  -= copy_size;
++		
++		sg++;
++		to += copy_size;
++	}
 +
-+	spin_unlock_bh(&np->rx_lock);
-+	return len;
++	return 0;
 +}
 +
-+static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++static int __nr_luns_under_host(struct vscsibk_info *info)
 +{
-+	struct net_device *netdev = container_of(cd, struct net_device,
-+						 class_dev);
-+	struct netfront_info *info = netdev_priv(netdev);
++	struct v2p_entry *entry;
++	struct list_head *head = &(info->v2p_entry_lists);
++	unsigned long flags;
++	int lun_cnt = 0;
 +
-+	return sprintf(buf, "%u\n", info->rx_max_target);
++	spin_lock_irqsave(&info->v2p_lock, flags);
++	list_for_each_entry(entry, head, l) {
++			lun_cnt++;
++	}
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++
++	return (lun_cnt);
 +}
 +
-+static ssize_t store_rxbuf_max(struct class_device *cd,
-+			       const char *buf, size_t len)
++
++/* REPORT LUNS Define*/
++#define VSCSI_REPORT_LUNS_HEADER	8
++#define VSCSI_REPORT_LUNS_RETRY		3
++
++/* quoted scsi_debug.c/resp_report_luns() */
++static void __report_luns(pending_req_t *pending_req, void *data)
 +{
-+	struct net_device *netdev = container_of(cd, struct net_device,
-+						 class_dev);
-+	struct netfront_info *np = netdev_priv(netdev);
-+	char *endp;
-+	unsigned long target;
++	struct vscsibk_info *info   = pending_req->info;
++	unsigned int        channel = pending_req->sdev->channel;
++	unsigned int        target  = pending_req->sdev->id;
++	unsigned int        nr_seg  = pending_req->nr_segments;
++	unsigned char *cmd = (unsigned char *)pending_req->cmnd;
++	
++	unsigned char *buff = NULL;
++	unsigned char alloc_len;
++	unsigned int alloc_luns = 0;
++	unsigned int req_bufflen = 0;
++	unsigned int actual_len = 0;
++	unsigned int retry_cnt = 0;
++	int select_report = (int)cmd[2];
++	int i, lun_cnt = 0, lun, upper, err = 0;
++	
++	struct v2p_entry *entry;
++	struct list_head *head = &(info->v2p_entry_lists);
++	unsigned long flags;
++	
++	struct scsi_lun *one_lun;
 +
-+	if (!capable(CAP_NET_ADMIN))
-+		return -EPERM;
++	req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
++	if ((req_bufflen < 4) || (select_report != 0))
++		goto fail;
 +
-+	target = simple_strtoul(buf, &endp, 0);
-+	if (endp == buf)
-+		return -EBADMSG;
++	alloc_luns = __nr_luns_under_host(info);
++	alloc_len  = sizeof(struct scsi_lun) * alloc_luns
++				+ VSCSI_REPORT_LUNS_HEADER;
++retry:
++	if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) {
++		printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
++		goto fail;
++	}
 +
-+	if (target < RX_MIN_TARGET)
-+		target = RX_MIN_TARGET;
-+	if (target > RX_MAX_TARGET)
-+		target = RX_MAX_TARGET;
++	memset(buff, 0, alloc_len);
 +
-+	spin_lock_bh(&np->rx_lock);
-+	if (target < np->rx_min_target)
-+		np->rx_min_target = target;
-+	np->rx_max_target = target;
-+	if (target < np->rx_target)
-+		np->rx_target = target;
++	one_lun = (struct scsi_lun *) &buff[8];
++	spin_lock_irqsave(&info->v2p_lock, flags);
++	list_for_each_entry(entry, head, l) {
++		if ((entry->v.chn == channel) &&
++		    (entry->v.tgt == target)) {
++			
++			/* check overflow */
++			if (lun_cnt >= alloc_luns) {
++				spin_unlock_irqrestore(&info->v2p_lock,
++							flags);
++
++				if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
++					retry_cnt++;
++					if (buff)
++						kfree(buff);
++					goto retry;
++				}
 +
-+	network_alloc_rx_buffers(netdev);
++				goto fail;
++			}
 +
-+	spin_unlock_bh(&np->rx_lock);
-+	return len;
-+}
++			lun = entry->v.lun;
++			upper = (lun >> 8) & 0x3f;
++			if (upper)
++				one_lun[lun_cnt].scsi_lun[0] = upper;
++			one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
++			lun_cnt++;
++		}
++	}
 +
-+static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
-+{
-+	struct net_device *netdev = container_of(cd, struct net_device,
-+						 class_dev);
-+	struct netfront_info *info = netdev_priv(netdev);
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
 +
-+	return sprintf(buf, "%u\n", info->rx_target);
++	buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
++	buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
++
++	actual_len = lun_cnt * sizeof(struct scsi_lun) 
++				+ VSCSI_REPORT_LUNS_HEADER;
++	req_bufflen = 0;
++	for (i = 0; i < nr_seg; i++)
++		req_bufflen += pending_req->sgl[i].length;
++
++	err = __copy_to_sg(pending_req->sgl, nr_seg, buff, 
++				min(req_bufflen, actual_len));
++	if (err)
++		goto fail;
++
++	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++	pending_req->rslt = 0x00;
++	pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
++
++	kfree(buff);
++	return;
++
++fail:
++	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++		INVALID_FIELD_IN_CDB, 0);
++	pending_req->rslt  = check_condition_result;
++	pending_req->resid = 0;
++	if (buff)
++		kfree(buff);
++	return;
 +}
 +
-+static const struct class_device_attribute xennet_attrs[] = {
-+	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
-+	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
-+	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
-+};
 +
-+static int xennet_sysfs_addif(struct net_device *netdev)
++
++int __pre_do_emulation(pending_req_t *pending_req, void *data)
 +{
-+	int i;
-+	int error = 0;
++	uint8_t op_code = pending_req->cmnd[0];
 +
-+	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
-+		error = class_device_create_file(&netdev->class_dev, 
-+						 &xennet_attrs[i]);
-+		if (error)
-+			goto fail;
++	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
++	    pre_function[op_code] != NULL) {
++		pre_function[op_code](pending_req, data);
 +	}
-+	return 0;
 +
-+ fail:
-+	while (--i >= 0)
-+		class_device_remove_file(&netdev->class_dev,
-+					 &xennet_attrs[i]);
-+	return error;
++	/*
++	    0: no need for native driver call, so should return immediately.
++	    1: non emulation or should call native driver 
++	       after modifing the request buffer.
++	*/
++	return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
 +}
 +
-+static void xennet_sysfs_delif(struct net_device *netdev)
++void scsiback_rsp_emulation(pending_req_t *pending_req)
 +{
-+	int i;
++	uint8_t op_code = pending_req->cmnd[0];
 +
-+	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
-+		class_device_remove_file(&netdev->class_dev,
-+					 &xennet_attrs[i]);
++	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
++	    post_function[op_code] != NULL) {
++		post_function[op_code](pending_req, NULL);
 +	}
-+}
 +
-+#endif /* CONFIG_SYSFS */
++	return;
++}
 +
 +
-+/*
-+ * Nothing to do here. Virtual interface is point-to-point and the
-+ * physical interface is probably promiscuous anyway.
-+ */
-+static void network_set_multicast_list(struct net_device *dev)
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
 +{
++	if (__pre_do_emulation(pending_req, NULL)) {
++		scsiback_cmd_exec(pending_req);
++	}
++	else {
++		scsiback_fast_flush_area(pending_req);
++		scsiback_do_resp_with_sense(pending_req->sense_buffer,
++		  pending_req->rslt, pending_req->resid, pending_req);
++	}
 +}
 +
-+static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
++
++/*
++  Following are not customizable functions.
++*/
++void scsiback_emulation_init(void)
 +{
-+	int i, err = 0;
-+	struct net_device *netdev = NULL;
-+	struct netfront_info *np = NULL;
++	int i;
 +
-+	netdev = alloc_etherdev(sizeof(struct netfront_info));
-+	if (!netdev) {
-+		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
-+		       __FUNCTION__);
-+		return ERR_PTR(-ENOMEM);
++	/* Initialize to default state */
++	for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
++		bitmap[i]        = (VSCSIIF_NEED_EMULATE_REQBUF | 
++					VSCSIIF_NEED_EMULATE_RSPBUF);
++		pre_function[i]  = resp_not_supported_cmd;
++		post_function[i] = NULL;
++		/* means,
++		   - no need for pre-emulation
++		   - no need for post-emulation
++		   - call native driver
++		*/
 +	}
 +
-+	np                   = netdev_priv(netdev);
-+	np->xbdev            = dev;
++	/*
++	  Register appropriate functions below as you need.
++	  (See scsi/scsi.h for definition of SCSI op_code.)
++	*/
 +
-+	spin_lock_init(&np->tx_lock);
-+	spin_lock_init(&np->rx_lock);
++	/*
++	  This command is Non emulation.
++	*/
++	bitmap[TEST_UNIT_READY] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[TEST_UNIT_READY] = NULL;
++	post_function[TEST_UNIT_READY] = NULL;
 +
-+	skb_queue_head_init(&np->rx_batch);
-+	np->rx_target     = RX_DFL_MIN_TARGET;
-+	np->rx_min_target = RX_DFL_MIN_TARGET;
-+	np->rx_max_target = RX_MAX_TARGET;
++	bitmap[REZERO_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[REZERO_UNIT] = NULL;
++	post_function[REZERO_UNIT] = NULL;
 +
-+	init_timer(&np->rx_refill_timer);
-+	np->rx_refill_timer.data = (unsigned long)netdev;
-+	np->rx_refill_timer.function = rx_refill_timeout;
++	bitmap[REQUEST_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[REQUEST_SENSE] = NULL;
++	post_function[REQUEST_SENSE] = NULL;
 +
-+	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
-+	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
-+		np->tx_skbs[i] = (void *)((unsigned long) i+1);
-+		np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+	}
++	bitmap[FORMAT_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[FORMAT_UNIT] = NULL;
++	post_function[FORMAT_UNIT] = NULL;
 +
-+	for (i = 0; i < NET_RX_RING_SIZE; i++) {
-+		np->rx_skbs[i] = NULL;
-+		np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+	}
++	bitmap[READ_BLOCK_LIMITS] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[READ_BLOCK_LIMITS] = NULL;
++	post_function[READ_BLOCK_LIMITS] = NULL;
 +
-+	/* A grant for every tx ring slot */
-+	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
-+					  &np->gref_tx_head) < 0) {
-+		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
-+		err = -ENOMEM;
-+		goto exit;
-+	}
-+	/* A grant for every rx ring slot */
-+	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
-+					  &np->gref_rx_head) < 0) {
-+		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
-+		err = -ENOMEM;
-+		goto exit_free_tx;
-+	}
++	bitmap[READ_6] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[READ_6] = NULL;
++	post_function[READ_6] = NULL;
 +
-+	netdev->open            = network_open;
-+	netdev->hard_start_xmit = network_start_xmit;
-+	netdev->stop            = network_close;
-+	netdev->get_stats       = network_get_stats;
-+	netdev->poll            = netif_poll;
-+	netdev->set_multicast_list = network_set_multicast_list;
-+	netdev->uninit          = netif_uninit;
-+	netdev->change_mtu	= xennet_change_mtu;
-+	netdev->weight          = 64;
-+	netdev->features        = NETIF_F_IP_CSUM;
++	bitmap[WRITE_6] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[WRITE_6] = NULL;
++	post_function[WRITE_6] = NULL;
 +
-+	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
-+	SET_MODULE_OWNER(netdev);
-+	SET_NETDEV_DEV(netdev, &dev->dev);
++	bitmap[WRITE_FILEMARKS] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[WRITE_FILEMARKS] = NULL;
++	post_function[WRITE_FILEMARKS] = NULL;
 +
-+	np->netdev = netdev;
++	bitmap[SPACE] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[SPACE] = NULL;
++	post_function[SPACE] = NULL;
 +
-+	netfront_carrier_off(np);
++	bitmap[INQUIRY] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[INQUIRY] = NULL;
++	post_function[INQUIRY] = NULL;
 +
-+	return netdev;
++	bitmap[ERASE] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[ERASE] = NULL;
++	post_function[ERASE] = NULL;
 +
-+ exit_free_tx:
-+	gnttab_free_grant_references(np->gref_tx_head);
-+ exit:
-+	free_netdev(netdev);
-+	return ERR_PTR(err);
-+}
++	bitmap[MODE_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[MODE_SENSE] = NULL;
++	post_function[MODE_SENSE] = NULL;
 +
++	bitmap[SEND_DIAGNOSTIC] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[SEND_DIAGNOSTIC] = NULL;
++	post_function[SEND_DIAGNOSTIC] = NULL;
++
++	bitmap[READ_CAPACITY] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[READ_CAPACITY] = NULL;
++	post_function[READ_CAPACITY] = NULL;
++
++	bitmap[READ_10] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[READ_10] = NULL;
++	post_function[READ_10] = NULL;
++
++	bitmap[WRITE_10] = VSCSIIF_NEED_CMD_EXEC;
++	pre_function[WRITE_10] = NULL;
++	post_function[WRITE_10] = NULL;
++
++	/*
++	  This command is Full emulation.
++	*/
++	pre_function[REPORT_LUNS] = __report_luns;
++	bitmap[REPORT_LUNS] = (VSCSIIF_NEED_EMULATE_REQBUF | 
++					VSCSIIF_NEED_EMULATE_RSPBUF);
++
++	return;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/interface.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/interface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,182 @@
 +/*
-+ * We use this notifier to send out a fake ARP reply to reset switches and
-+ * router ARP caches when an IP interface is brought up on a VIF.
++ * interface management.
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+static int
-+inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr;
-+	struct net_device *dev = ifa->ifa_dev->dev;
 +
-+	/* UP event and is it one of our devices? */
-+	if (event == NETDEV_UP && dev->open == network_open)
-+		(void)send_fake_arp(dev);
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
 +
-+	return NOTIFY_DONE;
-+}
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
 +
 +
-+static void netif_disconnect_backend(struct netfront_info *info)
++static kmem_cache_t *scsiback_cachep;
++
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
 +{
-+	/* Stop old i/f to prevent errors whilst we rebuild the state. */
-+	spin_lock_bh(&info->rx_lock);
-+	spin_lock_irq(&info->tx_lock);
-+	netfront_carrier_off(info);
-+	spin_unlock_irq(&info->tx_lock);
-+	spin_unlock_bh(&info->rx_lock);
++	struct vscsibk_info *info;
 +
-+	if (info->irq)
-+		unbind_from_irqhandler(info->irq, info->netdev);
-+	info->irq = 0;
++	info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
++	if (!info)
++		return ERR_PTR(-ENOMEM);
 +
-+	end_access(info->tx_ring_ref, info->tx.sring);
-+	end_access(info->rx_ring_ref, info->rx.sring);
-+	info->tx_ring_ref = GRANT_INVALID_REF;
-+	info->rx_ring_ref = GRANT_INVALID_REF;
-+	info->tx.sring = NULL;
-+	info->rx.sring = NULL;
++	memset(info, 0, sizeof(*info));
++	info->domid = domid;
++	spin_lock_init(&info->ring_lock);
++	atomic_set(&info->nr_unreplied_reqs, 0);
++	init_waitqueue_head(&info->wq);
++	init_waitqueue_head(&info->waiting_to_free);
++
++	return info;
 +}
 +
++static int map_frontend_page( struct vscsibk_info *info,
++				unsigned long ring_ref)
++{
++	struct gnttab_map_grant_ref op;
++	int err;
++
++	gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
++				GNTMAP_host_map, ring_ref,
++				info->domid);
 +
-+static void end_access(int ref, void *page)
++	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	BUG_ON(err);
++
++	if (op.status) {
++		printk(KERN_ERR "scsiback: Grant table operation failure !\n");
++		return op.status;
++	}
++
++	info->shmem_ref    = ring_ref;
++	info->shmem_handle = op.handle;
++
++	return (GNTST_okay);
++}
++
++static void unmap_frontend_page(struct vscsibk_info *info)
 +{
-+	if (ref != GRANT_INVALID_REF)
-+		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
++	struct gnttab_unmap_grant_ref op;
++	int err;
++
++	gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr,
++				GNTMAP_host_map, info->shmem_handle);
++
++	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	BUG_ON(err);
++
 +}
 +
++int scsiback_init_sring(struct vscsibk_info *info,
++		unsigned long ring_ref, unsigned int evtchn)
++{
++	struct vscsiif_sring *sring;
++	int err;
 +
-+/* ** Driver registration ** */
++	if (info->irq) {
++		printk(KERN_ERR "scsiback: Already connected through?\n");
++		return -1;
++	}
 +
++	info->ring_area = alloc_vm_area(PAGE_SIZE);
++	if (!info)
++		return -ENOMEM;
 +
-+static struct xenbus_device_id netfront_ids[] = {
-+	{ "vif" },
-+	{ "" }
-+};
-+MODULE_ALIAS("xen:vif");
++	err = map_frontend_page(info, ring_ref);
++	if (err)
++		goto free_vm;
 +
++	sring = (struct vscsiif_sring *) info->ring_area->addr;
++	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
 +
-+static struct xenbus_driver netfront = {
-+	.name = "vif",
-+	.owner = THIS_MODULE,
-+	.ids = netfront_ids,
-+	.probe = netfront_probe,
-+	.remove = __devexit_p(netfront_remove),
-+	.resume = netfront_resume,
-+	.otherend_changed = backend_changed,
-+};
++	err = bind_interdomain_evtchn_to_irqhandler(
++			info->domid, evtchn,
++			scsiback_intr, 0, "vscsiif-backend", info);
 +
++	if (err < 0)
++		goto unmap_page;
++		
++	info->irq = err;
 +
-+static struct notifier_block notifier_inetdev = {
-+	.notifier_call  = inetdev_notify,
-+	.next           = NULL,
-+	.priority       = 0
-+};
++	return 0;
 +
-+static int __init netif_init(void)
++unmap_page:
++	unmap_frontend_page(info);
++free_vm:
++	free_vm_area(info->ring_area);
++
++	return err;
++}
++
++void scsiback_disconnect(struct vscsibk_info *info)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	if (info->kthread) {
++		kthread_stop(info->kthread);
++		info->kthread = NULL;
++	}
 +
-+#ifdef CONFIG_XEN
-+	if (MODPARM_rx_flip && MODPARM_rx_copy) {
-+		WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
-+		return -EINVAL;
++	wait_event(info->waiting_to_free, 
++		atomic_read(&info->nr_unreplied_reqs) == 0);
++
++	if (info->irq) {
++		unbind_from_irqhandler(info->irq, info);
++		info->irq = 0;
 +	}
 +
-+	if (!MODPARM_rx_flip && !MODPARM_rx_copy)
-+		MODPARM_rx_flip = 1; /* Default is to flip. */
-+#endif
++	if (info->ring.sring) {
++		unmap_frontend_page(info);
++		free_vm_area(info->ring_area);
++		info->ring.sring = NULL;
++	}
++}
++
++void scsiback_free(struct vscsibk_info *info)
++{
++	kmem_cache_free(scsiback_cachep, info);
++}
++
++int __init scsiback_interface_init(void)
++{
++	scsiback_cachep = kmem_cache_create("vscsiif_cache",
++		sizeof(struct vscsibk_info), 0, 0, NULL, NULL);
++	if (!scsiback_cachep) {
++		printk(KERN_ERR "scsiback: can't init scsi cache\n");
++		return -ENOMEM;
++	}
++	
++	return 0;
++}
++
++void scsiback_interface_exit(void)
++{
++	kmem_cache_destroy(scsiback_cachep);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/scsiback.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/scsiback.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,717 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	if (is_initial_xendomain())
-+		return 0;
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
 +
-+	IPRINTK("Initialising virtual ethernet driver.\n");
++#include "common.h"
 +
-+	(void)register_inetaddr_notifier(&notifier_inetdev);
 +
-+	return xenbus_register_frontend(&netfront);
-+}
-+module_init(netif_init);
++struct list_head pending_free;
++DEFINE_SPINLOCK(pending_free_lock);
++DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
++module_param_named(reqs, vscsiif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
 +
++static unsigned int log_print_stat = 0;
++module_param(log_print_stat, int, 0644);
 +
-+static void __exit netif_exit(void)
-+{
-+	if (is_initial_xendomain())
-+		return;
++#define SCSIBACK_INVALID_HANDLE (~0)
 +
-+	unregister_inetaddr_notifier(&notifier_inetdev);
++static pending_req_t *pending_reqs;
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
 +
-+	return xenbus_unregister_driver(&netfront);
++static int vaddr_pagenr(pending_req_t *req, int seg)
++{
++	return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
 +}
-+module_exit(netif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,426 @@
-+/*
-+ * PCI Backend - Functions for creating a virtual configuration space for
-+ *               exported PCI Devices.
-+ *               It's dangerous to allow PCI Driver Domains to change their
-+ *               device's resources (memory, i/o ports, interrupts). We need to
-+ *               restrict changes to certain PCI Configuration registers:
-+ *               BARs, INTERRUPT_PIN, most registers in the header...
-+ *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
 +
-+#define DEFINE_PCI_CONFIG(op,size,type) 			\
-+int pciback_##op##_config_##size 				\
-+(struct pci_dev *dev, int offset, type value, void *data)	\
-+{								\
-+	return pci_##op##_config_##size (dev, offset, value);	\
++static unsigned long vaddr(pending_req_t *req, int seg)
++{
++	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++	return (unsigned long)pfn_to_kaddr(pfn);
 +}
 +
-+DEFINE_PCI_CONFIG(read, byte, u8 *)
-+DEFINE_PCI_CONFIG(read, word, u16 *)
-+DEFINE_PCI_CONFIG(read, dword, u32 *)
++#define pending_handle(_req, _seg) \
++	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
 +
-+DEFINE_PCI_CONFIG(write, byte, u8)
-+DEFINE_PCI_CONFIG(write, word, u16)
-+DEFINE_PCI_CONFIG(write, dword, u32)
 +
-+static int conf_space_read(struct pci_dev *dev,
-+			   struct config_field_entry *entry, int offset,
-+			   u32 * value)
++void scsiback_fast_flush_area(pending_req_t *req)
 +{
-+	int ret = 0;
-+	struct config_field *field = entry->field;
++	struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
++	unsigned int i, invcount = 0;
++	grant_handle_t handle;
++	int err;
 +
-+	*value = 0;
++	if (req->nr_segments) {
++		for (i = 0; i < req->nr_segments; i++) {
++			handle = pending_handle(req, i);
++			if (handle == SCSIBACK_INVALID_HANDLE)
++				continue;
++			gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
++						GNTMAP_host_map, handle);
++			pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
++			invcount++;
++		}
 +
-+	switch (field->size) {
-+	case 1:
-+		if (field->u.b.read)
-+			ret = field->u.b.read(dev, offset, (u8 *) value,
-+					      entry->data);
-+		break;
-+	case 2:
-+		if (field->u.w.read)
-+			ret = field->u.w.read(dev, offset, (u16 *) value,
-+					      entry->data);
-+		break;
-+	case 4:
-+		if (field->u.dw.read)
-+			ret = field->u.dw.read(dev, offset, value, entry->data);
-+		break;
++		err = HYPERVISOR_grant_table_op(
++			GNTTABOP_unmap_grant_ref, unmap, invcount);
++		BUG_ON(err);
++		kfree(req->sgl);
 +	}
-+	return ret;
++
++	return;
 +}
 +
-+static int conf_space_write(struct pci_dev *dev,
-+			    struct config_field_entry *entry, int offset,
-+			    u32 value)
++
++static pending_req_t * alloc_req(struct vscsibk_info *info)
 +{
-+	int ret = 0;
-+	struct config_field *field = entry->field;
++	pending_req_t *req = NULL;
++	unsigned long flags;
 +
-+	switch (field->size) {
-+	case 1:
-+		if (field->u.b.write)
-+			ret = field->u.b.write(dev, offset, (u8) value,
-+					       entry->data);
-+		break;
-+	case 2:
-+		if (field->u.w.write)
-+			ret = field->u.w.write(dev, offset, (u16) value,
-+					       entry->data);
-+		break;
-+	case 4:
-+		if (field->u.dw.write)
-+			ret = field->u.dw.write(dev, offset, value,
-+						entry->data);
-+		break;
++	spin_lock_irqsave(&pending_free_lock, flags);
++	if (!list_empty(&pending_free)) {
++		req = list_entry(pending_free.next, pending_req_t, free_list);
++		list_del(&req->free_list);
 +	}
-+	return ret;
-+}
-+
-+static inline u32 get_mask(int size)
-+{
-+	if (size == 1)
-+		return 0xff;
-+	else if (size == 2)
-+		return 0xffff;
-+	else
-+		return 0xffffffff;
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	return req;
 +}
 +
-+static inline int valid_request(int offset, int size)
-+{
-+	/* Validate request (no un-aligned requests) */
-+	if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
-+		return 1;
-+	return 0;
-+}
 +
-+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
-+			      int offset)
++static void free_req(pending_req_t *req)
 +{
-+	if (offset >= 0) {
-+		new_val_mask <<= (offset * 8);
-+		new_val <<= (offset * 8);
-+	} else {
-+		new_val_mask >>= (offset * -8);
-+		new_val >>= (offset * -8);
-+	}
-+	val = (val & ~new_val_mask) | (new_val & new_val_mask);
++	unsigned long flags;
++	int was_empty;
 +
-+	return val;
++	spin_lock_irqsave(&pending_free_lock, flags);
++	was_empty = list_empty(&pending_free);
++	list_add(&req->free_list, &pending_free);
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	if (was_empty)
++		wake_up(&pending_free_wq);
 +}
 +
-+static int pcibios_err_to_errno(int err)
++
++static void scsiback_notify_work(struct vscsibk_info *info)
 +{
-+	switch (err) {
-+	case PCIBIOS_SUCCESSFUL:
-+		return XEN_PCI_ERR_success;
-+	case PCIBIOS_DEVICE_NOT_FOUND:
-+		return XEN_PCI_ERR_dev_not_found;
-+	case PCIBIOS_BAD_REGISTER_NUMBER:
-+		return XEN_PCI_ERR_invalid_offset;
-+	case PCIBIOS_FUNC_NOT_SUPPORTED:
-+		return XEN_PCI_ERR_not_implemented;
-+	case PCIBIOS_SET_FAILED:
-+		return XEN_PCI_ERR_access_denied;
-+	}
-+	return err;
++	info->waiting_reqs = 1;
++	wake_up(&info->wq);
 +}
 +
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+			u32 * ret_val)
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++			uint32_t resid, pending_req_t *pending_req)
 +{
-+	int err = 0;
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry;
-+	struct config_field *field;
-+	int req_start, req_end, field_start, field_end;
-+	/* if read fails for any reason, return 0 (as if device didn't respond) */
-+	u32 value = 0, tmp_val;
++	vscsiif_response_t *ring_res;
++	struct vscsibk_info *info = pending_req->info;
++	int notify;
++	int more_to_do = 1;
++	unsigned long flags;
 +
-+	if (unlikely(verbose_request))
-+		printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
-+		       pci_name(dev), size, offset);
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+	if (!valid_request(offset, size)) {
-+		err = XEN_PCI_ERR_invalid_offset;
-+		goto out;
-+	}
++	spin_lock_irqsave(&info->ring_lock, flags);
 +
-+	/* Get the real value first, then modify as appropriate */
-+	switch (size) {
-+	case 1:
-+		err = pci_read_config_byte(dev, offset, (u8 *) & value);
-+		break;
-+	case 2:
-+		err = pci_read_config_word(dev, offset, (u16 *) & value);
-+		break;
-+	case 4:
-+		err = pci_read_config_dword(dev, offset, &value);
-+		break;
-+	}
++	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
++	info->ring.rsp_prod_pvt++;
 +
-+	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+		field = cfg_entry->field;
++	ring_res->rslt   = result;
++	ring_res->rqid   = pending_req->rqid;
 +
-+		req_start = offset;
-+		req_end = offset + size;
-+		field_start = OFFSET(cfg_entry);
-+		field_end = OFFSET(cfg_entry) + field->size;
++	if (sense_buffer != NULL) {
++		memcpy(ring_res->sense_buffer, sense_buffer,
++				VSCSIIF_SENSE_BUFFERSIZE);
++		ring_res->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++	} else {
++		ring_res->sense_len = 0;
++	}
 +
-+		if ((req_start >= field_start && req_start < field_end)
-+		    || (req_end > field_start && req_end <= field_end)) {
-+			err = conf_space_read(dev, cfg_entry, field_start,
-+					      &tmp_val);
-+			if (err)
-+				goto out;
++	ring_res->residual_len = resid;
 +
-+			value = merge_value(value, tmp_val,
-+					    get_mask(field->size),
-+					    field_start - req_start);
-+		}
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
++	if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
++		RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
++	} else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
++		more_to_do = 1;
 +	}
++	
++	spin_unlock_irqrestore(&info->ring_lock, flags);
 +
-+      out:
-+	if (unlikely(verbose_request))
-+		printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
-+		       pci_name(dev), size, offset, value);
++	if (more_to_do)
++		scsiback_notify_work(info);
 +
-+	*ret_val = value;
-+	return pcibios_err_to_errno(err);
++	if (notify)
++		notify_remote_via_irq(info->irq);
++
++	free_req(pending_req);
 +}
 +
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++static void scsiback_print_status(char *sense_buffer, int errors,
++					pending_req_t *pending_req)
 +{
-+	int err = 0, handled = 0;
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry;
-+	struct config_field *field;
-+	u32 tmp_val;
-+	int req_start, req_end, field_start, field_end;
++	struct scsi_device *sdev = pending_req->sdev;
++	
++	printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
++			sdev->channel, sdev->id, sdev->lun);
++	printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
++			status_byte(errors), msg_byte(errors),
++			host_byte(errors), driver_byte(errors));
 +
-+	if (unlikely(verbose_request))
-+		printk(KERN_DEBUG
-+		       "pciback: %s: write request %d bytes at 0x%x = %x\n",
-+		       pci_name(dev), size, offset, value);
++	printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
++			pending_req->cmnd[0]);
 +
-+	if (!valid_request(offset, size))
-+		return XEN_PCI_ERR_invalid_offset;
++	if (CHECK_CONDITION & status_byte(errors))
++		__scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
++}
 +
-+	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+		field = cfg_entry->field;
 +
-+		req_start = offset;
-+		req_end = offset + size;
-+		field_start = OFFSET(cfg_entry);
-+		field_end = OFFSET(cfg_entry) + field->size;
++static void scsiback_cmd_done(struct request *req, int errors)
++{
++	pending_req_t *pending_req = req->end_io_data;
++	unsigned char *sense_buffer;
++	unsigned int resid;
 +
-+		if ((req_start >= field_start && req_start < field_end)
-+		    || (req_end > field_start && req_end <= field_end)) {
-+			tmp_val = 0;
++	sense_buffer = req->sense;
++	resid        = req->data_len;
 +
-+			err = pciback_config_read(dev, field_start,
-+						  field->size, &tmp_val);
-+			if (err)
-+				break;
++	if (errors != 0) {
++		if (log_print_stat)
++			scsiback_print_status(sense_buffer, errors, pending_req);
++	}
 +
-+			tmp_val = merge_value(tmp_val, value, get_mask(size),
-+					      req_start - field_start);
++	scsiback_rsp_emulation(pending_req);
 +
-+			err = conf_space_write(dev, cfg_entry, field_start,
-+					       tmp_val);
++	scsiback_fast_flush_area(pending_req);
++	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
++	scsiback_put(pending_req->info);
 +
-+			/* handled is set true here, but not every byte
-+			 * may have been written! Properly detecting if
-+			 * every byte is handled is unnecessary as the
-+			 * flag is used to detect devices that need
-+			 * special helpers to work correctly.
-+			 */
-+			handled = 1;
++	__blk_put_request(req->q, req);
++}
++
++
++static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
++					pending_req_t *pending_req)
++{
++	u32 flags;
++	int write;
++	int i, err = 0;
++	unsigned int data_len = 0;
++	struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
++	struct vscsibk_info *info   = pending_req->info;
++
++	int data_dir = (int)pending_req->sc_data_direction;
++	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++
++	write = (data_dir == DMA_TO_DEVICE);
++
++	if (nr_segments) {
++		/* free of (sgl) in fast_flush_area()*/
++		pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
++						GFP_KERNEL);
++		if (!pending_req->sgl) {
++			printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++			return -ENOMEM;
 +		}
-+	}
 +
-+	if (!handled && !err) {
-+		/* By default, anything not specificially handled above is
-+		 * read-only. The permissive flag changes this behavior so
-+		 * that anything not specifically handled above is writable.
-+		 * This means that some fields may still be read-only because
-+		 * they have entries in the config_field list that intercept
-+		 * the write and do nothing. */
-+		if (dev_data->permissive) {
-+			switch (size) {
-+			case 1:
-+				err = pci_write_config_byte(dev, offset,
-+							    (u8) value);
-+				break;
-+			case 2:
-+				err = pci_write_config_word(dev, offset,
-+							    (u16) value);
-+				break;
-+			case 4:
-+				err = pci_write_config_dword(dev, offset,
-+							     (u32) value);
-+				break;
-+			}
-+		} else if (!dev_data->warned_on_write) {
-+			dev_data->warned_on_write = 1;
-+			dev_warn(&dev->dev, "Driver tried to write to a "
-+				 "read-only configuration space field at offset "
-+				 "0x%x, size %d. This may be harmless, but if "
-+				 "you have problems with your device:\n"
-+				 "1) see permissive attribute in sysfs\n"
-+				 "2) report problems to the xen-devel "
-+				 "mailing list along with details of your "
-+				 "device obtained from lspci.\n", offset, size);
++		for (i = 0; i < nr_segments; i++) {
++			flags = GNTMAP_host_map;
++			if (write)
++				flags |= GNTMAP_readonly;
++			gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++						ring_req->seg[i].gref,
++						info->domid);
 +		}
-+	}
 +
-+	return pcibios_err_to_errno(err);
-+}
++		err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
++		BUG_ON(err);
 +
-+void pciback_config_free_dyn_fields(struct pci_dev *dev)
-+{
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry, *t;
-+	struct config_field *field;
++		for (i = 0; i < nr_segments; i++) {
++			if (unlikely(map[i].status != 0)) {
++				printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
++				map[i].handle = SCSIBACK_INVALID_HANDLE;
++				err |= 1;
++			}
 +
-+	dev_dbg(&dev->dev,
-+		"free-ing dynamically allocated virtual configuration space fields\n");
++			pending_handle(pending_req, i) = map[i].handle;
 +
-+	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+		field = cfg_entry->field;
++			if (err)
++				continue;
 +
-+		if (field->clean) {
-+			field->clean(field);
++			set_phys_to_machine(__pa(vaddr(
++				pending_req, i)) >> PAGE_SHIFT,
++				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++
++			pending_req->sgl[i].page   = virt_to_page(vaddr(pending_req, i));
++			pending_req->sgl[i].offset = ring_req->seg[i].offset;
++			pending_req->sgl[i].length = ring_req->seg[i].length;
++			data_len += pending_req->sgl[i].length;
 +
-+			if (cfg_entry->data)
-+				kfree(cfg_entry->data);
++			barrier();
++			if (pending_req->sgl[i].offset >= PAGE_SIZE ||
++			    pending_req->sgl[i].length > PAGE_SIZE ||
++			    pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
++				err |= 1;
 +
-+			list_del(&cfg_entry->list);
-+			kfree(cfg_entry);
 +		}
 +
++		if (err)
++			goto fail_flush;
 +	}
++	
++	pending_req->request_bufflen = data_len;
++	
++	return 0;
++	
++fail_flush:
++	scsiback_fast_flush_area(pending_req);
++	return -ENOMEM;
 +}
 +
-+void pciback_config_reset_dev(struct pci_dev *dev)
++/* quoted scsi_lib.c/scsi_merge_bio */
++static int scsiback_merge_bio(struct request *rq, struct bio *bio)
 +{
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry;
-+	struct config_field *field;
++	struct request_queue *q = rq->q;
 +
-+	dev_dbg(&dev->dev, "resetting virtual configuration space\n");
++	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
++	if (rq_data_dir(rq) == WRITE)
++		bio->bi_rw |= (1 << BIO_RW);
 +
-+	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+		field = cfg_entry->field;
++	blk_queue_bounce(q, &bio);
 +
-+		if (field->reset)
-+			field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
++	if (!rq->bio)
++		blk_rq_bio_prep(q, rq, bio);
++	else if (!q->back_merge_fn(q, rq, bio))
++		return -EINVAL;
++	else {
++		rq->biotail->bi_next = bio;
++		rq->biotail          = bio;
++		rq->hard_nr_sectors += bio_sectors(bio);
++		rq->nr_sectors       = rq->hard_nr_sectors;
 +	}
-+}
 +
-+void pciback_config_free_dev(struct pci_dev *dev)
-+{
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry, *t;
-+	struct config_field *field;
++	return 0;
++}
 +
-+	dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
 +
-+	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+		list_del(&cfg_entry->list);
++/* quoted scsi_lib.c/scsi_bi_endio */
++static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
++{
++	if (bio->bi_size)
++		return 1;
 +
-+		field = cfg_entry->field;
++	bio_put(bio);
++	return 0;
++}
 +
-+		if (field->release)
-+			field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
 +
-+		kfree(cfg_entry);
-+	}
-+}
 +
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+				    struct config_field *field,
-+				    unsigned int base_offset)
++/* quoted scsi_lib.c/scsi_req_map_sg . */
++static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
 +{
-+	int err = 0;
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry;
-+	void *tmp;
++	struct request_queue *q = rq->q;
++	int nr_pages;
++	unsigned int nsegs = count;
 +
-+	cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
-+	if (!cfg_entry) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
++	unsigned int data_len = 0, len, bytes, off;
++	struct page *page;
++	struct bio *bio = NULL;
++	int i, err, nr_vecs = 0;
 +
-+	cfg_entry->data = NULL;
-+	cfg_entry->field = field;
-+	cfg_entry->base_offset = base_offset;
++	for (i = 0; i < nsegs; i++) {
++		page = pending_req->sgl[i].page;
++		off = (unsigned int)pending_req->sgl[i].offset;
++		len = (unsigned int)pending_req->sgl[i].length;
++		data_len += len;
++
++		nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
++		while (len > 0) {
++			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++			if (!bio) {
++				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
++				nr_pages -= nr_vecs;
++				bio = bio_alloc(GFP_KERNEL, nr_vecs);
++				if (!bio) {
++					err = -ENOMEM;
++					goto free_bios;
++				}
++				bio->bi_end_io = scsiback_bi_endio;
++			}
 +
-+	/* silently ignore duplicate fields */
-+	err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
-+	if (err)
-+		goto out;
++			if (bio_add_pc_page(q, bio, page, bytes, off) !=
++						bytes) {
++				bio_put(bio);
++				err = -EINVAL;
++				goto free_bios;
++			}
 +
-+	if (field->init) {
-+		tmp = field->init(dev, OFFSET(cfg_entry));
++			if (bio->bi_vcnt >= nr_vecs) {
++				err = scsiback_merge_bio(rq, bio);
++				if (err) {
++					bio_endio(bio, bio->bi_size, 0);
++					goto free_bios;
++				}
++				bio = NULL;
++			}
 +
-+		if (IS_ERR(tmp)) {
-+			err = PTR_ERR(tmp);
-+			goto out;
++			page++;
++			len -= bytes;
++			off = 0;
 +		}
-+
-+		cfg_entry->data = tmp;
 +	}
 +
-+	dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
-+		OFFSET(cfg_entry));
-+	list_add_tail(&cfg_entry->list, &dev_data->config_fields);
++	rq->buffer   = rq->data = NULL;
++	rq->data_len = data_len;
 +
-+      out:
-+	if (err)
-+		kfree(cfg_entry);
++	return 0;
++
++free_bios:
++	while ((bio = rq->bio) != NULL) {
++		rq->bio = bio->bi_next;
++		/*
++		 * call endio instead of bio_put incase it was bounced
++		 */
++		bio_endio(bio, bio->bi_size, 0);
++	}
 +
 +	return err;
 +}
 +
-+/* This sets up the device's virtual configuration space to keep track of 
-+ * certain registers (like the base address registers (BARs) so that we can
-+ * keep the client from manipulating them directly.
-+ */
-+int pciback_config_init_dev(struct pci_dev *dev)
++
++void scsiback_cmd_exec(pending_req_t *pending_req)
 +{
-+	int err = 0;
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++	int cmd_len  = (int)pending_req->cmd_len;
++	int data_dir = (int)pending_req->sc_data_direction;
++	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++	unsigned int timeout;
++	struct request *rq;
++	int write;
 +
-+	dev_dbg(&dev->dev, "initializing virtual configuration space\n");
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+	INIT_LIST_HEAD(&dev_data->config_fields);
++	/* because it doesn't timeout backend earlier than frontend.*/
++	if (pending_req->timeout_per_command)
++		timeout = pending_req->timeout_per_command * HZ;
++	else
++		timeout = VSCSIIF_TIMEOUT;
 +
-+	err = pciback_config_header_add_fields(dev);
-+	if (err)
-+		goto out;
++	write = (data_dir == DMA_TO_DEVICE);
++	rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
 +
-+	err = pciback_config_capability_add_fields(dev);
-+	if (err)
-+		goto out;
++	rq->flags  |= REQ_BLOCK_PC;
++	rq->cmd_len = cmd_len;
++	memcpy(rq->cmd, pending_req->cmnd, cmd_len);
 +
-+	err = pciback_config_quirks_init(dev);
++	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++	rq->sense       = pending_req->sense_buffer;
++	rq->sense_len = 0;
 +
-+      out:
-+	return err;
++	/* not allowed to retry in backend.                   */
++	rq->retries   = 0;
++	rq->timeout   = timeout;
++	rq->end_io_data = pending_req;
++
++	if (nr_segments) {
++
++		if (request_map_sg(rq, pending_req, nr_segments)) {
++			printk(KERN_ERR "scsiback: SG Request Map Error\n");
++			return;
++		}
++	}
++
++	scsiback_get(pending_req->info);
++	blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
++
++	return ;
 +}
 +
-+int pciback_config_init(void)
++
++static void scsiback_device_reset_exec(pending_req_t *pending_req)
 +{
-+	return pciback_config_capability_init();
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,71 @@
-+/*
-+ * PCI Backend - Handles the virtual fields found on the capability lists
-+ *               in the configuration space.
-+ *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
++	struct vscsibk_info *info = pending_req->info;
++	int err;
++	struct scsi_device *sdev = pending_req->sdev;
 +
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
++	scsiback_get(info);
++	err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
 +
-+static LIST_HEAD(capabilities);
++	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
++	scsiback_put(info);
 +
-+static struct config_field caplist_header[] = {
-+	{
-+	 .offset    = PCI_CAP_LIST_ID,
-+	 .size      = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
-+	 .u.w.read  = pciback_read_config_word,
-+	 .u.w.write = NULL,
-+	},
-+	{
-+	 .size = 0,
-+	},
-+};
++	return;
++}
 +
-+static inline void register_capability(struct pciback_config_capability *cap)
++
++irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	list_add_tail(&cap->cap_list, &capabilities);
++	scsiback_notify_work((struct vscsibk_info *)dev_id);
++	return IRQ_HANDLED;
 +}
 +
-+int pciback_config_capability_add_fields(struct pci_dev *dev)
++static int prepare_pending_reqs(struct vscsibk_info *info,
++		vscsiif_request_t *ring_req, pending_req_t *pending_req)
 +{
-+	int err = 0;
-+	struct pciback_config_capability *cap;
-+	int cap_offset;
++	struct scsi_device *sdev;
++	struct ids_tuple vir;
++	int err = -EINVAL;
 +
-+	list_for_each_entry(cap, &capabilities, cap_list) {
-+		cap_offset = pci_find_capability(dev, cap->capability);
-+		if (cap_offset) {
-+			dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
-+				cap->capability, cap_offset);
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+			err = pciback_config_add_fields_offset(dev,
-+							       caplist_header,
-+							       cap_offset);
-+			if (err)
-+				goto out;
-+			err = pciback_config_add_fields_offset(dev,
-+							       cap->fields,
-+							       cap_offset);
-+			if (err)
-+				goto out;
-+		}
++	pending_req->rqid       = ring_req->rqid;
++	pending_req->act        = ring_req->act;
++
++	pending_req->info       = info;
++
++	vir.chn = ring_req->channel;
++	vir.tgt = ring_req->id;
++	vir.lun = ring_req->lun;
++
++	rmb();
++	sdev = scsiback_do_translation(info, &vir);
++	if (!sdev) {
++		pending_req->sdev = NULL;
++		DPRINTK("scsiback: doesn't exist.\n");
++		err = -ENODEV;
++		goto invalid_value;
 +	}
++	pending_req->sdev = sdev;
 +
-+      out:
-+	return err;
-+}
++	/* request range check from frontend */
++	pending_req->sc_data_direction = ring_req->sc_data_direction;
++	barrier();
++	if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
++		(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
++		(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
++		(pending_req->sc_data_direction != DMA_NONE)) {
++		DPRINTK("scsiback: invalid parameter data_dir = %d\n",
++			pending_req->sc_data_direction);
++		err = -EINVAL;
++		goto invalid_value;
++	}
 +
-+extern struct pciback_config_capability pciback_config_capability_vpd;
-+extern struct pciback_config_capability pciback_config_capability_pm;
++	pending_req->nr_segments = ring_req->nr_segments;
++	barrier();
++	if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
++		DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
++			pending_req->nr_segments);
++		err = -EINVAL;
++		goto invalid_value;
++	}
 +
-+int pciback_config_capability_init(void)
-+{
-+	register_capability(&pciback_config_capability_vpd);
-+	register_capability(&pciback_config_capability_pm);
++	pending_req->cmd_len = ring_req->cmd_len;
++	barrier();
++	if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
++		DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
++			pending_req->cmd_len);
++		err = -EINVAL;
++		goto invalid_value;
++	}
++	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
++	
++	pending_req->timeout_per_command = ring_req->timeout_per_command;
++
++	if(scsiback_gnttab_data_map(ring_req, pending_req)) {
++		DPRINTK("scsiback: invalid buffer\n");
++		err = -EINVAL;
++		goto invalid_value;
++	}
 +
 +	return 0;
++
++invalid_value:
++	return err;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability.h tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability.h
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,23 @@
-+/*
-+ * PCI Backend - Data structures for special overlays for structures on
-+ *               the capability list.
-+ *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
 +
-+#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
-+#define __PCIBACK_CONFIG_CAPABILITY_H__
 +
-+#include <linux/pci.h>
-+#include <linux/list.h>
++static int scsiback_do_cmd_fn(struct vscsibk_info *info)
++{
++	struct vscsiif_back_ring *ring = &info->ring;
++	vscsiif_request_t  *ring_req;
 +
-+struct pciback_config_capability {
-+	struct list_head cap_list;
++	pending_req_t *pending_req;
++	RING_IDX rc, rp;
++	int err, more_to_do = 0;
 +
-+	int capability;
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+	/* If the device has the capability found above, add these fields */
-+	struct config_field *fields;
-+};
++	rc = ring->req_cons;
++	rp = ring->sring->req_prod;
++	rmb();
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability_pm.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability_pm.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability_pm.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability_pm.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,128 @@
-+/*
-+ * PCI Backend - Configuration space overlay for power management
-+ *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
++	while ((rc != rp)) {
++		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
++			break;
++		pending_req = alloc_req(info);
++		if (NULL == pending_req) {
++			more_to_do = 1;
++			break;
++		}
 +
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
++		ring_req = RING_GET_REQUEST(ring, rc);
++		ring->req_cons = ++rc;
 +
-+static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
-+			void *data)
-+{
-+	int err;
-+	u16 real_value;
++		err = prepare_pending_reqs(info, ring_req,
++						pending_req);
++		if (err == -EINVAL) {
++			scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++				0, pending_req);
++			continue;
++		} else if (err == -ENODEV) {
++			scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
++				0, pending_req);
++			continue;
++		}
 +
-+	err = pci_read_config_word(dev, offset, &real_value);
-+	if (err)
-+		goto out;
++		if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
++			scsiback_req_emulation_or_cmdexec(pending_req);
++		} else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
++			scsiback_device_reset_exec(pending_req);
++		} else {
++			printk(KERN_ERR "scsiback: invalid parameter for request\n");
++			scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++				0, pending_req);
++			continue;
++		}
++	}
 +
-+	*value = real_value & ~PCI_PM_CAP_PME_MASK;
++	if (RING_HAS_UNCONSUMED_REQUESTS(ring))
++		more_to_do = 1;
 +
-+      out:
-+	return err;
++	/* Yield point for this unbounded loop. */
++	cond_resched();
++
++	return more_to_do;
 +}
 +
-+/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
-+ * Can't allow driver domain to enable PMEs - they're shared */
-+#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
 +
-+static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
-+			 void *data)
++int scsiback_schedule(void *data)
 +{
-+	int err;
-+	u16 old_value;
-+	pci_power_t new_state, old_state;
++	struct vscsibk_info *info = (struct vscsibk_info *)data;
 +
-+	err = pci_read_config_word(dev, offset, &old_value);
-+	if (err)
-+		goto out;
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+	old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
-+	new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
++	while (!kthread_should_stop()) {
++		wait_event_interruptible(
++			info->wq,
++			info->waiting_reqs || kthread_should_stop());
++		wait_event_interruptible(
++			pending_free_wq,
++			!list_empty(&pending_free) || kthread_should_stop());
 +
-+	new_value &= PM_OK_BITS;
-+	if ((old_value & PM_OK_BITS) != new_value) {
-+		new_value = (old_value & ~PM_OK_BITS) | new_value;
-+		err = pci_write_config_word(dev, offset, new_value);
-+		if (err)
-+			goto out;
-+	}
++		info->waiting_reqs = 0;
++		smp_mb();
 +
-+	/* Let pci core handle the power management change */
-+	dev_dbg(&dev->dev, "set power state to %x\n", new_state);
-+	err = pci_set_power_state(dev, new_state);
-+	if (err) {
-+		err = PCIBIOS_SET_FAILED;
-+		goto out;
++		if (scsiback_do_cmd_fn(info))
++			info->waiting_reqs = 1;
 +	}
 +
-+	/*
-+	 * Device may lose PCI config info on D3->D0 transition. This
-+	 * is a problem for some guests which will not reset BARs. Even
-+	 * those that have a go will be foiled by our BAR-write handler
-+	 * which will discard the write! Since Linux won't re-init
-+	 * the config space automatically in all cases, we do it here.
-+	 * Future: Should we re-initialise all first 64 bytes of config space?
-+	 */
-+	if (new_state == PCI_D0 &&
-+	    (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
-+	    !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
-+		pci_restore_bars(dev);
-+
-+ out:
-+	return err;
++	return 0;
 +}
 +
-+/* Ensure PMEs are disabled */
-+static void *pm_ctrl_init(struct pci_dev *dev, int offset)
++
++static int __init scsiback_init(void)
 +{
-+	int err;
-+	u16 value;
++	int i, mmap_pages;
 +
-+	err = pci_read_config_word(dev, offset, &value);
-+	if (err)
-+		goto out;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+	if (value & PCI_PM_CTRL_PME_ENABLE) {
-+		value &= ~PCI_PM_CTRL_PME_ENABLE;
-+		err = pci_write_config_word(dev, offset, value);
-+	}
++	mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
 +
-+      out:
-+	return ERR_PTR(err);
-+}
++	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
++					vscsiif_reqs, GFP_KERNEL);
++	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++					mmap_pages, GFP_KERNEL);
++	pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
 +
-+static struct config_field caplist_pm[] = {
-+	{
-+		.offset     = PCI_PM_PMC,
-+		.size       = 2,
-+		.u.w.read   = pm_caps_read,
-+	},
-+	{
-+		.offset     = PCI_PM_CTRL,
-+		.size       = 2,
-+		.init       = pm_ctrl_init,
-+		.u.w.read   = pciback_read_config_word,
-+		.u.w.write  = pm_ctrl_write,
-+	},
-+	{
-+		.offset     = PCI_PM_PPB_EXTENSIONS,
-+		.size       = 1,
-+		.u.b.read   = pciback_read_config_byte,
-+	},
-+	{
-+		.offset     = PCI_PM_DATA_REGISTER,
-+		.size       = 1,
-+		.u.b.read   = pciback_read_config_byte,
-+	},
-+	{
-+		.size = 0,
-+	},
-+};
++	if (!pending_reqs || !pending_grant_handles || !pending_pages)
++		goto out_of_memory;
 +
-+struct pciback_config_capability pciback_config_capability_pm = {
-+	.capability = PCI_CAP_ID_PM,
-+	.fields = caplist_pm,
-+};
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability_vpd.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability_vpd.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_capability_vpd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_capability_vpd.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,42 @@
-+/*
-+ * PCI Backend - Configuration space overlay for Vital Product Data
-+ *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
++	for (i = 0; i < mmap_pages; i++)
++		pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
 +
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
++	if (scsiback_interface_init() < 0)
++		goto out_of_kmem;
 +
-+static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
-+			     void *data)
++	memset(pending_reqs, 0, sizeof(pending_reqs));
++	INIT_LIST_HEAD(&pending_free);
++
++	for (i = 0; i < vscsiif_reqs; i++)
++		list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++	if (scsiback_xenbus_init())
++		goto out_of_xenbus;
++
++	scsiback_emulation_init();
++
++	return 0;
++
++out_of_xenbus:
++	scsiback_xenbus_unregister();
++out_of_kmem:
++	scsiback_interface_exit();
++out_of_memory:
++	kfree(pending_reqs);
++	kfree(pending_grant_handles);
++	free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++	printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
++	return -ENOMEM;
++}
++
++static void __exit scsiback_exit(void)
 +{
-+	/* Disallow writes to the vital product data */
-+	if (value & PCI_VPD_ADDR_F)
-+		return PCIBIOS_SET_FAILED;
-+	else
-+		return pci_write_config_word(dev, offset, value);
++	scsiback_xenbus_unregister();
++	scsiback_interface_exit();
++	kfree(pending_reqs);
++	kfree(pending_grant_handles);
++	free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
++
 +}
 +
-+static struct config_field caplist_vpd[] = {
-+	{
-+	 .offset    = PCI_VPD_ADDR,
-+	 .size      = 2,
-+	 .u.w.read  = pciback_read_config_word,
-+	 .u.w.write = vpd_address_write,
-+	 },
-+	{
-+	 .offset     = PCI_VPD_DATA,
-+	 .size       = 4,
-+	 .u.dw.read  = pciback_read_config_dword,
-+	 .u.dw.write = NULL,
-+	 },
-+	{
-+	 .size = 0,
-+	 },
-+};
-+ 
-+struct pciback_config_capability pciback_config_capability_vpd = {
-+	.capability = PCI_CAP_ID_VPD,
-+	.fields = caplist_vpd,
-+};
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space.h tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space.h
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,126 @@
++module_init(scsiback_init);
++module_exit(scsiback_exit);
++
++MODULE_DESCRIPTION("Xen SCSI backend driver");
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/translate.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/translate.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,168 @@
 +/*
-+ * PCI Backend - Common data structures for overriding the configuration space
++ * Xen SCSI backend driver
 + *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
-+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
-+#define __XEN_PCIBACK_CONF_SPACE_H__
-+
 +#include <linux/list.h>
-+#include <linux/err.h>
++#include <linux/gfp.h>
 +
-+/* conf_field_init can return an errno in a ptr with ERR_PTR() */
-+typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
-+typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
-+typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
++#include "common.h"
 +
-+typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
-+				 void *data);
-+typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
-+				void *data);
-+typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
-+				void *data);
-+typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
-+				void *data);
-+typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
-+			       void *data);
-+typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
-+			       void *data);
++/*
++  Initialize the translation entry list
++*/
++void scsiback_init_translation_table(struct vscsibk_info *info)
++{
++	INIT_LIST_HEAD(&info->v2p_entry_lists);
++	spin_lock_init(&info->v2p_lock);
++}
 +
-+/* These are the fields within the configuration space which we
-+ * are interested in intercepting reads/writes to and changing their
-+ * values.
-+ */
-+struct config_field {
-+	unsigned int offset;
-+	unsigned int size;
-+	unsigned int mask;
-+	conf_field_init init;
-+	conf_field_reset reset;
-+	conf_field_free release;
-+	void (*clean) (struct config_field * field);
-+	union {
-+		struct {
-+			conf_dword_write write;
-+			conf_dword_read read;
-+		} dw;
-+		struct {
-+			conf_word_write write;
-+			conf_word_read read;
-+		} w;
-+		struct {
-+			conf_byte_write write;
-+			conf_byte_read read;
-+		} b;
-+	} u;
-+	struct list_head list;
-+};
 +
-+struct config_field_entry {
-+	struct list_head list;
-+	struct config_field *field;
-+	unsigned int base_offset;
-+	void *data;
-+};
++/*
++  Add a new translation entry
++*/
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++			struct scsi_device *sdev, struct ids_tuple *v)
++{
++	int err = 0;
++	struct v2p_entry *entry;
++	struct v2p_entry *new;
++	struct list_head *head = &(info->v2p_entry_lists);
++	unsigned long flags;
++	
++	spin_lock_irqsave(&info->v2p_lock, flags);
 +
-+#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
++	/* Check double assignment to identical virtual ID */
++	list_for_each_entry(entry, head, l) {
++		if ((entry->v.chn == v->chn) &&
++		    (entry->v.tgt == v->tgt) &&
++		    (entry->v.lun == v->lun)) {
++			printk(KERN_WARNING "scsiback: Virtual ID is already used. "
++			       "Assignment was not performed.\n");
++			err = -EEXIST;
++			goto out;
++		}
 +
-+/* Add fields to a device - the add_fields macro expects to get a pointer to
-+ * the first entry in an array (of which the ending is marked by size==0)
-+ */
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+				    struct config_field *field,
-+				    unsigned int offset);
++	}
 +
-+static inline int pciback_config_add_field(struct pci_dev *dev,
-+					   struct config_field *field)
-+{
-+	return pciback_config_add_field_offset(dev, field, 0);
++	/* Create a new translation entry and add to the list */
++	if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
++		printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++		err = -ENOMEM;
++		goto out;
++	}
++	new->v = *v;
++	new->sdev = sdev;
++	list_add_tail(&new->l, head);
++
++out:	
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++	return err;
 +}
 +
-+static inline int pciback_config_add_fields(struct pci_dev *dev,
-+					    struct config_field *field)
++
++/*
++  Delete the translation entry specfied
++*/
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++				struct ids_tuple *v)
 +{
-+	int i, err = 0;
-+	for (i = 0; field[i].size != 0; i++) {
-+		err = pciback_config_add_field(dev, &field[i]);
-+		if (err)
-+			break;
++	struct v2p_entry *entry;
++	struct list_head *head = &(info->v2p_entry_lists);
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->v2p_lock, flags);
++	/* Find out the translation entry specified */
++	list_for_each_entry(entry, head, l) {
++		if ((entry->v.chn == v->chn) &&
++		    (entry->v.tgt == v->tgt) &&
++		    (entry->v.lun == v->lun)) {
++			goto found;
++		}
 +	}
-+	return err;
++
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++	return 1;
++
++found:
++	/* Delete the translation entry specfied */
++	scsi_device_put(entry->sdev);
++	list_del(&entry->l);
++	kfree(entry);
++
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++	return 0;
 +}
 +
-+static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
-+						   struct config_field *field,
-+						   unsigned int offset)
++
++/*
++  Perform virtual to physical translation
++*/
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++			struct ids_tuple *v)
 +{
-+	int i, err = 0;
-+	for (i = 0; field[i].size != 0; i++) {
-+		err = pciback_config_add_field_offset(dev, &field[i], offset);
-+		if (err)
-+			break;
++	struct v2p_entry *entry;
++	struct list_head *head = &(info->v2p_entry_lists);
++	struct scsi_device *sdev = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->v2p_lock, flags);
++	list_for_each_entry(entry, head, l) {
++		if ((entry->v.chn == v->chn) &&
++		    (entry->v.tgt == v->tgt) &&
++		    (entry->v.lun == v->lun)) {
++			sdev = entry->sdev;
++			goto out;
++		}
 +	}
-+	return err;
++out:
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++	return sdev;
 +}
 +
-+/* Read/Write the real configuration space */
-+int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
-+			     void *data);
-+int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
-+			     void *data);
-+int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
-+			      void *data);
-+int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
-+			      void *data);
-+int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
-+			      void *data);
-+int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
-+			       void *data);
 +
-+int pciback_config_capability_init(void);
++/*
++  Release the translation entry specfied
++*/
++void scsiback_release_translation_entry(struct vscsibk_info *info)
++{
++	struct v2p_entry *entry, *tmp;
++	struct list_head *head = &(info->v2p_entry_lists);
++	unsigned long flags;
 +
-+int pciback_config_header_add_fields(struct pci_dev *dev);
-+int pciback_config_capability_add_fields(struct pci_dev *dev);
++	spin_lock_irqsave(&info->v2p_lock, flags);
++	list_for_each_entry_safe(entry, tmp, head, l) {
++		scsi_device_put(entry->sdev);
++		list_del(&entry->l);
++		kfree(entry);
++	}
 +
-+#endif				/* __XEN_PCIBACK_CONF_SPACE_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_header.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_header.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_header.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_header.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,323 @@
++	spin_unlock_irqrestore(&info->v2p_lock, flags);
++	return;
++
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsiback/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsiback/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,368 @@
 +/*
-+ * PCI Backend - Handles the virtual fields in the configuration space headers.
++ * Xen SCSI backend driver
 + *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+
-+struct pci_bar_info {
-+	u32 val;
-+	u32 len_val;
-+	int which;
-+};
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
 +
-+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
-+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
++#include "common.h"
 +
-+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
++struct backend_info
 +{
-+	int err;
++	struct xenbus_device *dev;
++	struct vscsibk_info *info;
++};
 +
-+	if (!dev->is_enabled && is_enable_cmd(value)) {
-+		if (unlikely(verbose_request))
-+			printk(KERN_DEBUG "pciback: %s: enable\n",
-+			       pci_name(dev));
-+		err = pci_enable_device(dev);
-+		if (err)
-+			return err;
-+	} else if (dev->is_enabled && !is_enable_cmd(value)) {
-+		if (unlikely(verbose_request))
-+			printk(KERN_DEBUG "pciback: %s: disable\n",
-+			       pci_name(dev));
-+		pci_disable_device(dev);
-+	}
 +
-+	if (!dev->is_busmaster && is_master_cmd(value)) {
-+		if (unlikely(verbose_request))
-+			printk(KERN_DEBUG "pciback: %s: set bus master\n",
-+			       pci_name(dev));
-+		pci_set_master(dev);
-+	}
++static int __vscsiif_name(struct backend_info *be, char *buf)
++{
++	struct xenbus_device *dev = be->dev;
++	unsigned int domid, id;
 +
-+	if (value & PCI_COMMAND_INVALIDATE) {
-+		if (unlikely(verbose_request))
-+			printk(KERN_DEBUG
-+			       "pciback: %s: enable memory-write-invalidate\n",
-+			       pci_name(dev));
-+		err = pci_set_mwi(dev);
-+		if (err) {
-+			printk(KERN_WARNING
-+			       "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
-+			       pci_name(dev), err);
-+			value &= ~PCI_COMMAND_INVALIDATE;
-+		}
-+	}
++	sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
++	snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
 +
-+	return pci_write_config_word(dev, offset, value);
++	return 0;
 +}
 +
-+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
++static int scsiback_map(struct backend_info *be)
 +{
-+	struct pci_bar_info *bar = data;
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	int err;
++	char name[TASK_COMM_LEN];
 +
-+	if (unlikely(!bar)) {
-+		printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+		       pci_name(dev));
-+		return XEN_PCI_ERR_op_failed;
++	err = xenbus_gather(XBT_NIL, dev->otherend,
++			"ring-ref", "%lu", &ring_ref,
++			"event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
++		return err;
 +	}
 +
-+	/* A write to obtain the length must happen as a 32-bit write.
-+	 * This does not (yet) support writing individual bytes
-+	 */
-+	if (value == ~PCI_ROM_ADDRESS_ENABLE)
-+		bar->which = 1;
-+	else {
-+		u32 tmpval;
-+		pci_read_config_dword(dev, offset, &tmpval);
-+		if (tmpval != bar->val && value == bar->val) {
-+			/* Allow restoration of bar value. */
-+			pci_write_config_dword(dev, offset, bar->val);
-+		}
-+		bar->which = 0;
++	err = scsiback_init_sring(be->info, ring_ref, evtchn);
++	if (err)
++		return err;
++
++	err = __vscsiif_name(be, name);
++	if (err) {
++		xenbus_dev_error(dev, err, "get scsiback dev name");
++		return err;
 +	}
 +
-+	/* Do we need to support enabling/disabling the rom address here? */
++	be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
++	if (IS_ERR(be->info->kthread)) {
++		err = PTR_ERR(be->info->kthread);
++		be->info->kthread = NULL;
++		xenbus_dev_error(be->dev, err, "start vscsiif");
++		return err;
++	}
 +
 +	return 0;
 +}
 +
-+/* For the BARs, only allow writes which write ~0 or
-+ * the correct resource information
-+ * (Needed for when the driver probes the resource usage)
-+ */
-+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++
++struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
 +{
-+	struct pci_bar_info *bar = data;
++	struct Scsi_Host *shost;
++	struct scsi_device *sdev = NULL;
 +
-+	if (unlikely(!bar)) {
-+		printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+		       pci_name(dev));
-+		return XEN_PCI_ERR_op_failed;
++	shost = scsi_host_lookup(phy->hst);
++	if (IS_ERR(shost)) {
++		printk(KERN_ERR "scsiback: host%d doesn't exist.\n",
++			phy->hst);
++		return NULL;
 +	}
-+
-+	/* A write to obtain the length must happen as a 32-bit write.
-+	 * This does not (yet) support writing individual bytes
-+	 */
-+	if (value == ~0)
-+		bar->which = 1;
-+	else {
-+		u32 tmpval;
-+		pci_read_config_dword(dev, offset, &tmpval);
-+		if (tmpval != bar->val && value == bar->val) {
-+			/* Allow restoration of bar value. */
-+			pci_write_config_dword(dev, offset, bar->val);
-+		}
-+		bar->which = 0;
++	sdev   = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
++	if (!sdev) {
++		printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n",
++			phy->hst, phy->chn, phy->tgt, phy->lun);
++		scsi_host_put(shost);
++		return NULL;
 +	}
 +
-+	return 0;
++	scsi_host_put(shost);
++	return (sdev);
 +}
 +
-+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++#define VSCSIBACK_OP_ADD_OR_DEL_LUN	1
++#define VSCSIBACK_OP_UPDATEDEV_STATE	2
++
++
++static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
 +{
-+	struct pci_bar_info *bar = data;
++	int i, err = 0;
++	struct ids_tuple phy, vir;
++	int device_state;
++	char str[64], state_str[64];
++	char **dir;
++	unsigned int dir_n = 0;
++	struct xenbus_device *dev = be->dev;
++	struct scsi_device *sdev;
 +
-+	if (unlikely(!bar)) {
-+		printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+		       pci_name(dev));
-+		return XEN_PCI_ERR_op_failed;
-+	}
++	dir = xenbus_directory(XBT_NIL, dev->nodename, "vscsi-devs", &dir_n);
++	if (IS_ERR(dir))
++		return;
 +
-+	*value = bar->which ? bar->len_val : bar->val;
++	for (i = 0; i < dir_n; i++) {
++		
++		/* read status */
++		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
++		err = xenbus_scanf(XBT_NIL, dev->nodename, state_str, "%u",
++			&device_state);
++		if (XENBUS_EXIST_ERR(err))
++			continue;
 +
-+	return 0;
-+}
++		/* physical SCSI device */
++		snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", dir[i]);
++		err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++			"%u:%u:%u:%u", &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
++		if (XENBUS_EXIST_ERR(err)) {
++			xenbus_printf(XBT_NIL, dev->nodename, state_str,
++					"%d", XenbusStateClosed);
++			continue;
++		}
 +
-+static inline void read_dev_bar(struct pci_dev *dev,
-+				struct pci_bar_info *bar_info, int offset,
-+				u32 len_mask)
-+{
-+	pci_read_config_dword(dev, offset, &bar_info->val);
-+	pci_write_config_dword(dev, offset, len_mask);
-+	pci_read_config_dword(dev, offset, &bar_info->len_val);
-+	pci_write_config_dword(dev, offset, bar_info->val);
-+}
++		/* virtual SCSI device */
++		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++		err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++			"%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
++		if (XENBUS_EXIST_ERR(err)) {
++			xenbus_printf(XBT_NIL, dev->nodename, state_str,
++					"%d", XenbusStateClosed);
++			continue;
++		}
 +
-+static void *bar_init(struct pci_dev *dev, int offset)
-+{
-+	struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++		switch (op) {
++		case VSCSIBACK_OP_ADD_OR_DEL_LUN:
++			if (device_state == XenbusStateInitialising) {
++				sdev = scsiback_get_scsi_device(&phy);
++				if (!sdev)
++					xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++							    "%d", XenbusStateClosed);
++				else {
++					err = scsiback_add_translation_entry(be->info, sdev, &vir);
++					if (!err) {
++						if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++								    "%d", XenbusStateInitialised)) {
++							printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++							scsiback_del_translation_entry(be->info, &vir);
++						}
++					} else {
++						scsi_device_put(sdev);
++						xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++								    "%d", XenbusStateClosed);
++					}
++				}
++			}
 +
-+	if (!bar)
-+		return ERR_PTR(-ENOMEM);
++			if (device_state == XenbusStateClosing) {
++				if (!scsiback_del_translation_entry(be->info, &vir)) {
++					if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++							    "%d", XenbusStateClosed))
++						printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++				}
++			}
++			break;
 +
-+	read_dev_bar(dev, bar, offset, ~0);
-+	bar->which = 0;
++		case VSCSIBACK_OP_UPDATEDEV_STATE:
++			if (device_state == XenbusStateInitialised) {
++				/* modify vscsi-devs/dev-x/state */
++				if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++						    "%d", XenbusStateConnected)) {
++					printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++					scsiback_del_translation_entry(be->info, &vir);
++					xenbus_printf(XBT_NIL, dev->nodename, state_str, 
++							    "%d", XenbusStateClosed);
++				}
++			}
++			break;
++		/*When it is necessary, processing is added here.*/
++		default:
++			break;
++		}
++	}
 +
-+	return bar;
++	kfree(dir);
++	return ;
 +}
 +
-+static void *rom_init(struct pci_dev *dev, int offset)
++
++static void scsiback_frontend_changed(struct xenbus_device *dev,
++					enum xenbus_state frontend_state)
 +{
-+	struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++	struct backend_info *be = dev->dev.driver_data;
++	int err;
 +
-+	if (!bar)
-+		return ERR_PTR(-ENOMEM);
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++		break;
++	case XenbusStateInitialised:
++		err = scsiback_map(be);
++		if (err)
++			break;
 +
-+	read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-+	bar->which = 0;
++		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++		xenbus_switch_state(dev, XenbusStateConnected);
 +
-+	return bar;
-+}
++		break;
++	case XenbusStateConnected:
 +
-+static void bar_reset(struct pci_dev *dev, int offset, void *data)
-+{
-+	struct pci_bar_info *bar = data;
++		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
 +
-+	bar->which = 0;
-+}
++		if (dev->state == XenbusStateConnected)
++			break;
 +
-+static void bar_release(struct pci_dev *dev, int offset, void *data)
-+{
-+	kfree(data);
++		xenbus_switch_state(dev, XenbusStateConnected);
++
++		break;
++
++	case XenbusStateClosing:
++		scsiback_disconnect(be->info);
++		xenbus_switch_state(dev, XenbusStateClosing);
++		break;
++
++	case XenbusStateClosed:
++		xenbus_switch_state(dev, XenbusStateClosed);
++		if (xenbus_dev_is_online(dev))
++			break;
++		/* fall through if not online */
++	case XenbusStateUnknown:
++		device_unregister(&dev->dev);
++		break;
++
++	case XenbusStateReconfiguring:
++		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++
++		xenbus_switch_state(dev, XenbusStateReconfigured);
++
++		break;
++
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++					frontend_state);
++		break;
++	}
 +}
 +
-+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
-+			  void *data)
++
++static int scsiback_remove(struct xenbus_device *dev)
 +{
-+	*value = (u8) dev->irq;
++	struct backend_info *be = dev->dev.driver_data;
++
++	if (be->info) {
++		scsiback_disconnect(be->info);
++		scsiback_release_translation_entry(be->info);
++		scsiback_free(be->info);
++		be->info = NULL;
++	}
++
++	kfree(be);
++	dev->dev.driver_data = NULL;
 +
 +	return 0;
 +}
 +
-+static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
++
++static int scsiback_probe(struct xenbus_device *dev,
++			   const struct xenbus_device_id *id)
 +{
-+	u8 cur_value;
 +	int err;
 +
-+	err = pci_read_config_byte(dev, offset, &cur_value);
++	struct backend_info *be = kzalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++
++	DPRINTK("%p %d\n", dev, dev->otherend_id);
++
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
++	}
++	be->dev = dev;
++	dev->dev.driver_data = be;
++
++	be->info = vscsibk_info_alloc(dev->otherend_id);
++	if (IS_ERR(be->info)) {
++		err = PTR_ERR(be->info);
++		be->info = NULL;
++		xenbus_dev_fatal(dev, err, "creating scsihost interface");
++		goto fail;
++	}
++
++	be->info->dev = dev;
++	be->info->irq = 0;
++
++	scsiback_init_translation_table(be->info);
++
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
 +	if (err)
-+		goto out;
++		goto fail;
 +
-+	if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
-+	    || value == PCI_BIST_START)
-+		err = pci_write_config_byte(dev, offset, value);
++	return 0;
++
++
++fail:
++	printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__);
++	scsiback_remove(dev);
 +
-+      out:
 +	return err;
 +}
 +
-+static struct config_field header_common[] = {
-+	{
-+	 .offset    = PCI_COMMAND,
-+	 .size      = 2,
-+	 .u.w.read  = pciback_read_config_word,
-+	 .u.w.write = command_write,
-+	},
-+	{
-+	 .offset    = PCI_INTERRUPT_LINE,
-+	 .size      = 1,
-+	 .u.b.read  = interrupt_read,
-+	},
-+	{
-+	 .offset    = PCI_INTERRUPT_PIN,
-+	 .size      = 1,
-+	 .u.b.read  = pciback_read_config_byte,
-+	},
-+	{
-+	 /* Any side effects of letting driver domain control cache line? */
-+	 .offset    = PCI_CACHE_LINE_SIZE,
-+	 .size      = 1,
-+	 .u.b.read  = pciback_read_config_byte,
-+	 .u.b.write = pciback_write_config_byte,
-+	},
-+	{
-+	 .offset    = PCI_LATENCY_TIMER,
-+	 .size      = 1,
-+	 .u.b.read  = pciback_read_config_byte,
-+	},
-+	{
-+	 .offset    = PCI_BIST,
-+	 .size      = 1,
-+	 .u.b.read  = pciback_read_config_byte,
-+	 .u.b.write = bist_write,
-+	},
-+	{
-+	 .size = 0,
-+	},
++
++static struct xenbus_device_id scsiback_ids[] = {
++	{ "vscsi" },
++	{ "" }
 +};
 +
-+#define CFG_FIELD_BAR(reg_offset) 			\
-+	{ 						\
-+	 .offset     = reg_offset, 			\
-+	 .size       = 4, 				\
-+	 .init       = bar_init, 			\
-+	 .reset      = bar_reset, 			\
-+	 .release    = bar_release, 			\
-+	 .u.dw.read  = bar_read, 			\
-+	 .u.dw.write = bar_write, 			\
-+	 }
++static struct xenbus_driver scsiback = {
++	.name			= "vscsi",
++	.owner			= THIS_MODULE,
++	.ids			= scsiback_ids,
++	.probe			= scsiback_probe,
++	.remove			= scsiback_remove,
++	.otherend_changed	= scsiback_frontend_changed
++};
 +
-+#define CFG_FIELD_ROM(reg_offset) 			\
-+	{ 						\
-+	 .offset     = reg_offset, 			\
-+	 .size       = 4, 				\
-+	 .init       = rom_init, 			\
-+	 .reset      = bar_reset, 			\
-+	 .release    = bar_release, 			\
-+	 .u.dw.read  = bar_read, 			\
-+	 .u.dw.write = rom_write, 			\
-+	 }
++int scsiback_xenbus_init(void)
++{
++	return xenbus_register_backend(&scsiback);
++}
 +
-+static struct config_field header_0[] = {
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
-+	CFG_FIELD_ROM(PCI_ROM_ADDRESS),
-+	{
-+	 .size = 0,
-+	},
-+};
++void scsiback_xenbus_unregister(void)
++{
++	xenbus_unregister_driver(&scsiback);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsifront/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsifront/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,3 @@
 +
-+static struct config_field header_1[] = {
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+	CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
-+	{
-+	 .size = 0,
-+	},
++obj-$(CONFIG_XEN_SCSI_FRONTEND)	:= xenscsi.o
++xenscsi-objs := scsifront.o xenbus.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsifront/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsifront/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,129 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_SCSIFRONT_H__
++#define __XEN_DRIVERS_SCSIFRONT_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/blkdev.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/evtchn.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/vscsiif.h>
++#include <asm/delay.h>
++
++
++#define GRANT_INVALID_REF	0
++#define VSCSI_IN_ABORT		1
++#define VSCSI_IN_RESET		2
++
++/* tuning point*/
++#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
++#define VSCSIIF_MAX_TARGET          64
++#define VSCSIIF_MAX_LUN             255
++
++#define VSCSIIF_RING_SIZE	\
++    __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE)
++#define VSCSIIF_MAX_REQS	VSCSIIF_RING_SIZE
++
++struct vscsifrnt_shadow {
++	uint16_t next_free;
++	
++	/* command between backend and frontend
++	 * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */
++	unsigned char act;
++	
++	/* do reset function */
++	wait_queue_head_t wq_reset;	/* reset work queue           */
++	int wait_reset;			/* reset work queue condition */
++	int32_t rslt_reset;		/* reset response status      */
++					/* (SUCESS or FAILED)         */
++
++	/* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) 
++	   requests */
++	unsigned int sc_data_direction;
++	
++	/* Number of pieces of scatter-gather */
++	unsigned int nr_segments;
++
++	/* requested struct scsi_cmnd is stored from kernel */
++	unsigned long req_scsi_cmnd;
++	int gref[VSCSIIF_SG_TABLESIZE];
 +};
 +
-+int pciback_config_header_add_fields(struct pci_dev *dev)
-+{
-+	int err;
++struct vscsifrnt_info {
++	struct xenbus_device *dev;
 +
-+	err = pciback_config_add_fields(dev, header_common);
-+	if (err)
-+		goto out;
++	struct Scsi_Host *host;
 +
-+	switch (dev->hdr_type) {
-+	case PCI_HEADER_TYPE_NORMAL:
-+		err = pciback_config_add_fields(dev, header_0);
-+		break;
++	spinlock_t io_lock;
++	spinlock_t shadow_lock;
++	unsigned int evtchn;
++	unsigned int irq;
 +
-+	case PCI_HEADER_TYPE_BRIDGE:
-+		err = pciback_config_add_fields(dev, header_1);
-+		break;
++	grant_ref_t ring_ref;
++	struct vscsiif_front_ring ring;
++	struct vscsiif_response	ring_res;
 +
-+	default:
-+		err = -EINVAL;
-+		printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
-+		       pci_name(dev), dev->hdr_type);
-+		break;
-+	}
++	struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS];
++	uint32_t shadow_free;
 +
-+      out:
-+	return err;
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_quirks.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_quirks.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_quirks.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_quirks.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,126 @@
++	struct task_struct *kthread;
++	wait_queue_head_t wq;
++	unsigned int waiting_resp;
++
++};
++
++#define DPRINTK(_f, _a...)				\
++	pr_debug("(file=%s, line=%d) " _f,	\
++		 __FILE__ , __LINE__ , ## _a )
++
++int scsifront_xenbus_init(void);
++void scsifront_xenbus_unregister(void);
++int scsifront_schedule(void *data);
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs);
++int scsifront_cmd_done(struct vscsifrnt_info *info);
++
++
++#endif /* __XEN_DRIVERS_SCSIFRONT_H__  */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsifront/scsifront.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsifront/scsifront.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,511 @@
 +/*
-+ * PCI Backend - Handle special overlays for broken devices.
++ * Xen SCSI frontend driver
 + *
-+ * Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ * Author: Chris Bookholt <hap10 at epoch.ncsc.mil>
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
++ 
 +
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
-+
-+LIST_HEAD(pciback_quirks);
++#include <linux/version.h>
++#include "common.h"
 +
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
++static int get_id_from_freelist(struct vscsifrnt_info *info)
 +{
-+	struct pciback_config_quirk *tmp_quirk;
++	unsigned long flags;
++	uint32_t free;
 +
-+	list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
-+	    if (pci_match_id(&tmp_quirk->devid, dev))
-+		goto out;
-+	tmp_quirk = NULL;
-+	printk(KERN_DEBUG
-+	       "quirk didn't match any device pciback knows about\n");
-+      out:
-+	return tmp_quirk;
-+}
++	spin_lock_irqsave(&info->shadow_lock, flags);
 +
-+static inline void register_quirk(struct pciback_config_quirk *quirk)
-+{
-+	list_add_tail(&quirk->quirks_list, &pciback_quirks);
++	free = info->shadow_free;
++	BUG_ON(free > VSCSIIF_MAX_REQS);
++	info->shadow_free = info->shadow[free].next_free;
++	info->shadow[free].next_free = 0x0fff;
++
++	info->shadow[free].wait_reset = 0;
++
++	spin_unlock_irqrestore(&info->shadow_lock, flags);
++
++	return free;
 +}
 +
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
++static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
 +{
-+	int ret = 0;
-+	struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+	struct config_field_entry *cfg_entry;
++	unsigned long flags;
 +
-+	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+		if ( OFFSET(cfg_entry) == reg) {
-+			ret = 1;
-+			break;
-+		}
-+	}
-+	return ret;
++	spin_lock_irqsave(&info->shadow_lock, flags);
++
++	info->shadow[id].next_free  = info->shadow_free;
++	info->shadow[id].req_scsi_cmnd = 0;
++	info->shadow_free = id;
++
++	spin_unlock_irqrestore(&info->shadow_lock, flags);
 +}
 +
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+				    *field)
++
++struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
 +{
-+	int err = 0;
++	struct vscsiif_front_ring *ring = &(info->ring);
++	vscsiif_request_t *ring_req;
++	uint32_t id;
 +
-+	switch (field->size) {
-+	case 1:
-+		field->u.b.read = pciback_read_config_byte;
-+		field->u.b.write = pciback_write_config_byte;
-+		break;
-+	case 2:
-+		field->u.w.read = pciback_read_config_word;
-+		field->u.w.write = pciback_write_config_word;
-+		break;
-+	case 4:
-+		field->u.dw.read = pciback_read_config_dword;
-+		field->u.dw.write = pciback_write_config_dword;
-+		break;
-+	default:
-+		err = -EINVAL;
-+		goto out;
-+	}
++	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
 +
-+	pciback_config_add_field(dev, field);
++	ring->req_prod_pvt++;
++	
++	id = get_id_from_freelist(info);	/* use id by response */
++	ring_req->rqid = (uint16_t)id;
 +
-+      out:
-+	return err;
++	return ring_req;
 +}
 +
-+int pciback_config_quirks_init(struct pci_dev *dev)
-+{
-+	struct pciback_config_quirk *quirk;
-+	int ret = 0;
 +
-+	quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
-+	if (!quirk) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
++static void scsifront_notify_work(struct vscsifrnt_info *info)
++{
++	info->waiting_resp = 1;
++	wake_up(&info->wq);
++}
 +
-+	quirk->devid.vendor = dev->vendor;
-+	quirk->devid.device = dev->device;
-+	quirk->devid.subvendor = dev->subsystem_vendor;
-+	quirk->devid.subdevice = dev->subsystem_device;
-+	quirk->devid.class = 0;
-+	quirk->devid.class_mask = 0;
-+	quirk->devid.driver_data = 0UL;
 +
-+	quirk->pdev = dev;
++static void scsifront_do_request(struct vscsifrnt_info *info)
++{
++	struct vscsiif_front_ring *ring = &(info->ring);
++	unsigned int irq = info->irq;
++	int notify;
 +
-+	register_quirk(quirk);
-+      out:
-+	return ret;
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
++	if (notify)
++		notify_remote_via_irq(irq);
 +}
 +
-+void pciback_config_field_free(struct config_field *field)
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs)
 +{
-+	kfree(field);
++	scsifront_notify_work((struct vscsifrnt_info *)dev_id);
++	return IRQ_HANDLED;
 +}
 +
-+int pciback_config_quirk_release(struct pci_dev *dev)
++
++static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
 +{
-+	struct pciback_config_quirk *quirk;
-+	int ret = 0;
++	int i;
 +
-+	quirk = pciback_find_quirk(dev);
-+	if (!quirk) {
-+		ret = -ENXIO;
-+		goto out;
-+	}
++	if (s->sc_data_direction == DMA_NONE)
++		return;
 +
-+	list_del(&quirk->quirks_list);
-+	kfree(quirk);
++	if (s->nr_segments) {
++		for (i = 0; i < s->nr_segments; i++) {
++			if (unlikely(gnttab_query_foreign_access(
++				s->gref[i]) != 0)) {
++				printk(KERN_ALERT "scsifront: "
++					"grant still in use by backend.\n");
++				BUG();
++			}
++			gnttab_end_foreign_access(s->gref[i], 0UL);
++		}
++	}
 +
-+      out:
-+	return ret;
++	return;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_quirks.h tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_quirks.h
---- pristine-linux-2.6.18.2/drivers/xen/pciback/conf_space_quirks.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/conf_space_quirks.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,35 @@
-+/*
-+ * PCI Backend - Data structures for special overlays for broken devices.
-+ *
-+ * Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ * Chris Bookholt <hap10 at epoch.ncsc.mil>
-+ */
 +
-+#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
-+#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
 +
-+#include <linux/pci.h>
-+#include <linux/list.h>
++static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
++		       vscsiif_response_t *ring_res)
++{
++	struct scsi_cmnd *sc;
++	uint32_t id;
++	uint8_t sense_len;
 +
-+struct pciback_config_quirk {
-+	struct list_head quirks_list;
-+	struct pci_device_id devid;
-+	struct pci_dev *pdev;
-+};
++	id = ring_res->rqid;
++	sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
 +
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
++	if (sc == NULL)
++		BUG();
 +
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+				    *field);
++	scsifront_gnttab_done(&info->shadow[id], id);
++	add_id_to_freelist(info, id);
 +
-+int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
++	sc->result = ring_res->rslt;
++	sc->resid  = ring_res->residual_len;
 +
-+int pciback_config_quirks_init(struct pci_dev *dev);
++	if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
++		sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++	else
++		sense_len = ring_res->sense_len;
 +
-+void pciback_config_field_free(struct config_field *field);
++	if (sense_len)
++		memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
 +
-+int pciback_config_quirk_release(struct pci_dev *dev);
++	sc->scsi_done(sc);
 +
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
++	return;
++}
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/controller.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/controller.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/controller.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/controller.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,404 @@
-+/*
-+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
-+ *      Alex Williamson <alex.williamson at hp.com>
-+ *
-+ * PCI "Controller" Backend - virtualize PCI bus topology based on PCI
-+ * controllers.  Devices under the same PCI controller are exposed on the
-+ * same virtual domain:bus.  Within a bus, device slots are virtualized
-+ * to compact the bus.
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ */
 +
-+#include <linux/acpi.h>
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
++				vscsiif_response_t *ring_res)
++{
++	uint16_t id = ring_res->rqid;
++	unsigned long flags;
++	
++	spin_lock_irqsave(&info->shadow_lock, flags);
++	info->shadow[id].wait_reset = 1;
++	info->shadow[id].rslt_reset = ring_res->rslt;
++	spin_unlock_irqrestore(&info->shadow_lock, flags);
 +
-+#define PCI_MAX_BUSSES	255
-+#define PCI_MAX_SLOTS	32
++	wake_up(&(info->shadow[id].wq_reset));
++}
 +
-+struct controller_dev_entry {
-+	struct list_head list;
-+	struct pci_dev *dev;
-+	unsigned int devfn;
-+};
 +
-+struct controller_list_entry {
-+	struct list_head list;
-+	struct pci_controller *controller;
-+	unsigned int domain;
-+	unsigned int bus;
-+	unsigned int next_devfn;
-+	struct list_head dev_list;
-+};
++int scsifront_cmd_done(struct vscsifrnt_info *info)
++{
++	vscsiif_response_t *ring_res;
 +
-+struct controller_dev_data {
-+	struct list_head list;
-+	unsigned int next_domain;
-+	unsigned int next_bus;
-+	spinlock_t lock;
-+};
++	RING_IDX i, rp;
++	int more_to_do = 0;
++	unsigned long flags;
 +
-+struct walk_info {
-+	struct pciback_device *pdev;
-+	int resource_count;
-+	int root_num;
-+};
++	spin_lock_irqsave(&info->io_lock, flags);
 +
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus,
-+				    unsigned int devfn)
-+{
-+	struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+	struct controller_dev_entry *dev_entry;
-+	struct controller_list_entry *cntrl_entry;
-+	struct pci_dev *dev = NULL;
-+	unsigned long flags;
++	rp = info->ring.sring->rsp_prod;
++	rmb();
++	for (i = info->ring.rsp_cons; i != rp; i++) {
++		
++		ring_res = RING_GET_RESPONSE(&info->ring, i);
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
++		if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
++			scsifront_cdb_cmd_done(info, ring_res);
++		else
++			scsifront_sync_cmd_done(info, ring_res);
++	}
 +
-+	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+		if (cntrl_entry->domain != domain ||
-+		    cntrl_entry->bus != bus)
-+			continue;
++	info->ring.rsp_cons = i;
 +
-+		list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-+			if (devfn == dev_entry->devfn) {
-+				dev = dev_entry->dev;
-+				goto found;
-+			}
-+		}
++	if (i != info->ring.req_prod_pvt) {
++		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++	} else {
++		info->ring.sring->rsp_event = i + 1;
 +	}
-+found:
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
 +
-+	return dev;
-+}
++	spin_unlock_irqrestore(&info->io_lock, flags);
 +
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+	struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+	struct controller_dev_entry *dev_entry;
-+	struct controller_list_entry *cntrl_entry;
-+	struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
-+	unsigned long flags;
-+	int ret = 0, found = 0;
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
++	/* Yield point for this unbounded loop. */
++	cond_resched();
 +
-+	/* Look to see if we already have a domain:bus for this controller */
-+	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+		if (cntrl_entry->controller == dev_controller) {
-+			found = 1;
-+			break;
-+		}
-+	}
++	return more_to_do;
++}
 +
-+	if (!found) {
-+		cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
-+		if (!cntrl_entry) {
-+			ret =  -ENOMEM;
-+			goto out;
-+		}
 +
-+		cntrl_entry->controller = dev_controller;
-+		cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
 +
-+		cntrl_entry->domain = dev_data->next_domain;
-+		cntrl_entry->bus = dev_data->next_bus++;
-+		if (dev_data->next_bus > PCI_MAX_BUSSES) {
-+			dev_data->next_domain++;
-+			dev_data->next_bus = 0;
-+		}
 +
-+		INIT_LIST_HEAD(&cntrl_entry->dev_list);
++int scsifront_schedule(void *data)
++{
++	struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
 +
-+		list_add_tail(&cntrl_entry->list, &dev_data->list);
-+	}
++	while (!kthread_should_stop()) {
++		wait_event_interruptible(
++			info->wq,
++			info->waiting_resp || kthread_should_stop());
 +
-+	if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
-+		/*
-+		 * While it seems unlikely, this can actually happen if
-+		 * a controller has P2P bridges under it.
-+		 */
-+		xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
-+				 "is full, no room to export %04x:%02x:%02x.%x",
-+				 cntrl_entry->domain, cntrl_entry->bus,
-+				 pci_domain_nr(dev->bus), dev->bus->number,
-+				 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
-+		ret = -ENOSPC;
-+		goto out;
-+	}
++		info->waiting_resp = 0;
++		smp_mb();
 +
-+	dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
-+	if (!dev_entry) {
-+		if (list_empty(&cntrl_entry->dev_list)) {
-+			list_del(&cntrl_entry->list);
-+			kfree(cntrl_entry);
-+		}
-+		ret = -ENOMEM;
-+		goto out;
++		if (scsifront_cmd_done(info))
++			info->waiting_resp = 1;
 +	}
 +
-+	dev_entry->dev = dev;
-+	dev_entry->devfn = cntrl_entry->next_devfn;
-+
-+	list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
++	return 0;
++}
 +
-+	cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
 +
-+out:
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
-+	return ret;
-+}
 +
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++static int map_data_for_request(struct vscsifrnt_info *info,
++		struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
 +{
-+	struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+	struct controller_list_entry *cntrl_entry;
-+	struct controller_dev_entry *dev_entry = NULL;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
++	grant_ref_t gref_head;
++	struct page *page;
++	int err, i, ref, ref_cnt = 0;
++	int write = (sc->sc_data_direction == DMA_TO_DEVICE);
++	int nr_pages, off, len, bytes;
++	unsigned long buffer_pfn;
++	unsigned int data_len = 0;
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
++	if (sc->sc_data_direction == DMA_NONE)
++		return 0;
 +
-+	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+		if (cntrl_entry->controller != PCI_CONTROLLER(dev))
-+			continue;
++	err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
++	if (err) {
++		printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n");
++		return -ENOMEM;
++	}
 +
-+		list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-+			if (dev_entry->dev == dev) {
-+				found_dev = dev_entry->dev;
-+				break;
++	if (sc->use_sg) {
++		/* quoted scsi_lib.c/scsi_req_map_sg . */
++		struct scatterlist *sg = (struct scatterlist *)sc->request_buffer;
++		nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++		if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++			printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
++			ref_cnt = (-E2BIG);
++			goto big_to_sg;
++		}
++
++		for (i = 0; i < sc->use_sg; i++) {
++			page = sg[i].page;
++			off = sg[i].offset;
++			len = sg[i].length;
++			data_len += len;
++
++			buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
++
++			while (len > 0) {
++				bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++				
++				ref = gnttab_claim_grant_reference(&gref_head);
++				BUG_ON(ref == -ENOSPC);
++
++				gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++					buffer_pfn, write);
++
++				info->shadow[id].gref[ref_cnt]  = ref;
++				ring_req->seg[ref_cnt].gref     = ref;
++				ring_req->seg[ref_cnt].offset   = (uint16_t)off;
++				ring_req->seg[ref_cnt].length   = (uint16_t)bytes;
++
++				buffer_pfn++;
++				len -= bytes;
++				off = 0;
++				ref_cnt++;
 +			}
 +		}
-+	}
++	} else if (sc->request_bufflen) {
++		unsigned long end   = ((unsigned long)sc->request_buffer
++					+ sc->request_bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++		unsigned long start = (unsigned long)sc->request_buffer >> PAGE_SHIFT;
 +
-+	if (!found_dev) {
-+		spin_unlock_irqrestore(&dev_data->lock, flags);
-+		return;
-+	}
++		page = virt_to_page(sc->request_buffer);
++		nr_pages = end - start;
++		len = sc->request_bufflen;
 +
-+	list_del(&dev_entry->list);
-+	kfree(dev_entry);
++		if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++			ref_cnt = (-E2BIG);
++			goto big_to_sg;
++		}
 +
-+	if (list_empty(&cntrl_entry->dev_list)) {
-+		list_del(&cntrl_entry->list);
-+		kfree(cntrl_entry);
-+	}
++		buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
 +
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
-+	pcistub_put_pci_dev(found_dev);
-+}
++		off = offset_in_page((unsigned long)sc->request_buffer);
++		for (i = 0; i < nr_pages; i++) {
++			bytes = PAGE_SIZE - off;
 +
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+	struct controller_dev_data *dev_data;
++			if (bytes > len)
++				bytes = len;
 +
-+	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+	if (!dev_data)
-+		return -ENOMEM;
++			ref = gnttab_claim_grant_reference(&gref_head);
++			BUG_ON(ref == -ENOSPC);
 +
-+	spin_lock_init(&dev_data->lock);
++			gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++				buffer_pfn, write);
 +
-+	INIT_LIST_HEAD(&dev_data->list);
++			info->shadow[id].gref[i] = ref;
++			ring_req->seg[i].gref     = ref;
++			ring_req->seg[i].offset   = (uint16_t)off;
++			ring_req->seg[i].length   = (uint16_t)bytes;
 +
-+	/* Starting domain:bus numbers */
-+	dev_data->next_domain = 0;
-+	dev_data->next_bus = 0;
++			buffer_pfn++;
++			len -= bytes;
++			off = 0;
++			ref_cnt++;
++		}
++	}
 +
-+	pdev->pci_dev_data = dev_data;
++big_to_sg:
 +
-+	return 0;
++	gnttab_free_grant_references(gref_head);
++
++	return ref_cnt;
 +}
 +
-+static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
++static int scsifront_queuecommand(struct scsi_cmnd *sc,
++				  void (*done)(struct scsi_cmnd *))
 +{
-+	struct walk_info *info = data;
-+	struct acpi_resource_address64 addr;
-+	acpi_status status;
-+	int i, len, err;
-+	char str[32], tmp[3];
-+	unsigned char *ptr, *buf;
++	struct vscsifrnt_info *info =
++		(struct vscsifrnt_info *) sc->device->host->hostdata;
++	vscsiif_request_t *ring_req;
++	int ref_cnt;
++	uint16_t rqid;
 +
-+	status = acpi_resource_to_address64(res, &addr);
++	if (RING_FULL(&info->ring)) {
++		goto out_host_busy;
++	}
 +
-+	/* Do we care about this range?  Let's check. */
-+	if (!ACPI_SUCCESS(status) ||
-+	    !(addr.resource_type == ACPI_MEMORY_RANGE ||
-+	      addr.resource_type == ACPI_IO_RANGE) ||
-+	    !addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
-+		return AE_OK;
++	sc->scsi_done = done;
++	sc->result    = 0;
 +
-+	/*
-+	 * Furthermore, we really only care to tell the guest about
-+	 * address ranges that require address translation of some sort.
-+	 */
-+	if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
-+	      addr.info.mem.translation) &&
-+	    !(addr.resource_type == ACPI_IO_RANGE &&
-+	      addr.info.io.translation))
-+		return AE_OK;
-+	   
-+	/* Store the resource in xenbus for the guest */
-+	len = snprintf(str, sizeof(str), "root-%d-resource-%d",
-+		       info->root_num, info->resource_count);
-+	if (unlikely(len >= (sizeof(str) - 1)))
-+		return AE_OK;
++	ring_req          = scsifront_pre_request(info);
++	rqid              = ring_req->rqid;
++	ring_req->act     = VSCSIIF_ACT_SCSI_CDB;
 +
-+	buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
-+	if (!buf)
-+		return AE_OK;
++	ring_req->id      = sc->device->id;
++	ring_req->lun     = sc->device->lun;
++	ring_req->channel = sc->device->channel;
++	ring_req->cmd_len = sc->cmd_len;
 +
-+	/* Clean out resource_source */
-+	res->data.address64.resource_source.index = 0xFF;
-+	res->data.address64.resource_source.string_length = 0;
-+	res->data.address64.resource_source.string_ptr = NULL;
++	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
 +
-+	ptr = (unsigned char *)res;
++	if ( sc->cmd_len )
++		memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++	else
++		memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
 +
-+	/* Turn the acpi_resource into an ASCII byte stream */
-+	for (i = 0; i < sizeof(*res); i++) {
-+		snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
-+		strncat(buf, tmp, 2);
++	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
++	ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++
++	info->shadow[rqid].req_scsi_cmnd     = (unsigned long)sc;
++	info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
++	info->shadow[rqid].act               = ring_req->act;
++
++	ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
++	if (ref_cnt < 0) {
++		add_id_to_freelist(info, rqid);
++		if (ref_cnt == (-ENOMEM))
++			goto out_host_busy;
++		else {
++			sc->result = (DID_ERROR << 16);
++			goto out_fail_command;
++		}
 +	}
 +
-+	err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
-+			    str, "%s", buf);
++	ring_req->nr_segments          = (uint8_t)ref_cnt;
++	info->shadow[rqid].nr_segments = ref_cnt;
 +
-+	if (!err)
-+		info->resource_count++;
++	scsifront_do_request(info);
 +
-+	kfree(buf);
++	return 0;
 +
-+	return AE_OK;
++out_host_busy:
++	return SCSI_MLQUEUE_HOST_BUSY;
++
++out_fail_command:
++	done(sc);
++	return 0;
 +}
 +
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+			      publish_pci_root_cb publish_root_cb)
++
++static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
 +{
-+	struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+	struct controller_list_entry *cntrl_entry;
-+	int i, root_num, len, err = 0;
-+	unsigned int domain, bus;
-+	char str[64];
-+	struct walk_info info;
++	return (FAILED);
++}
 +
-+	spin_lock(&dev_data->lock);
++/* vscsi supports only device_reset, because it is each of LUNs */
++static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
++{
++	struct Scsi_Host *host = sc->device->host;
++	struct vscsifrnt_info *info =
++		(struct vscsifrnt_info *) sc->device->host->hostdata;
 +
-+	list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+		/* First publish all the domain:bus info */
-+		err = publish_root_cb(pdev, cntrl_entry->domain,
-+				      cntrl_entry->bus);
-+		if (err)
-+			goto out;
++	vscsiif_request_t *ring_req;
++	uint16_t rqid;
++	int err;
 +
-+		/*
-+ 		 * Now figure out which root-%d this belongs to
-+		 * so we can associate resources with it.
-+		 */
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+				   "root_num", "%d", &root_num);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++	spin_lock_irq(host->host_lock);
++#endif
 +
-+		if (err != 1)
-+			goto out;
++	ring_req      = scsifront_pre_request(info);
++	ring_req->act = VSCSIIF_ACT_SCSI_RESET;
 +
-+		for (i = 0; i < root_num; i++) {
-+			len = snprintf(str, sizeof(str), "root-%d", i);
-+			if (unlikely(len >= (sizeof(str) - 1))) {
-+				err = -ENOMEM;
-+				goto out;
-+			}
++	rqid          = ring_req->rqid;
++	info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
 +
-+			err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+					   str, "%x:%x", &domain, &bus);
-+			if (err != 2)
-+				goto out;
++	ring_req->channel = sc->device->channel;
++	ring_req->id      = sc->device->id;
++	ring_req->lun     = sc->device->lun;
++	ring_req->cmd_len = sc->cmd_len;
 +
-+			/* Is this the one we just published? */
-+			if (domain == cntrl_entry->domain &&
-+			    bus == cntrl_entry->bus)
-+				break;
-+		}
++	if ( sc->cmd_len )
++		memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++	else
++		memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
 +
-+		if (i == root_num)
-+			goto out;
++	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
++	ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++	ring_req->nr_segments         = 0;
 +
-+		info.pdev = pdev;
-+		info.resource_count = 0;
-+		info.root_num = i;
++	scsifront_do_request(info);	
 +
-+		/* Let ACPI do the heavy lifting on decoding resources */
-+		acpi_walk_resources(cntrl_entry->controller->acpi_handle,
-+				    METHOD_NAME__CRS, write_xenbus_resource,
-+				    &info);
++	spin_unlock_irq(host->host_lock);
++	wait_event_interruptible(info->shadow[rqid].wq_reset,
++			 info->shadow[rqid].wait_reset);
++	spin_lock_irq(host->host_lock);
 +
-+		/* No resouces.  OK.  On to the next one */
-+		if (!info.resource_count)
-+			continue;
++	err = info->shadow[rqid].rslt_reset;
 +
-+		/* Store the number of resources we wrote for this root-%d */
-+		len = snprintf(str, sizeof(str), "root-%d-resources", i);
-+		if (unlikely(len >= (sizeof(str) - 1))) {
-+			err = -ENOMEM;
-+			goto out;
-+		}
++	add_id_to_freelist(info, rqid);
 +
-+		err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+				    "%d", info.resource_count);
-+		if (err)
-+			goto out;
-+	}
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++	spin_unlock_irq(host->host_lock);
++#endif
++	return (err);
++}
 +
-+	/* Finally, write some magic to synchronize with the guest. */
-+	len = snprintf(str, sizeof(str), "root-resource-magic");
-+	if (unlikely(len >= (sizeof(str) - 1))) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
 +
-+	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+			    "%lx", (sizeof(struct acpi_resource) * 2) + 1);
++struct scsi_host_template scsifront_sht = {
++	.module			= THIS_MODULE,
++	.name			= "Xen SCSI frontend driver",
++	.queuecommand		= scsifront_queuecommand,
++	.eh_abort_handler	= scsifront_eh_abort_handler,
++	.eh_device_reset_handler= scsifront_dev_reset_handler,
++	.cmd_per_lun		= VSCSIIF_DEFAULT_CMD_PER_LUN,
++	.can_queue		= VSCSIIF_MAX_REQS,
++	.this_id 		= -1,
++	.sg_tablesize		= VSCSIIF_SG_TABLESIZE,
++	.use_clustering		= DISABLE_CLUSTERING,
++	.proc_name		= "scsifront",
++};
 +
-+out:
-+	spin_unlock(&dev_data->lock);
++
++static int __init scsifront_init(void)
++{
++	int err;
++
++	if (!is_running_on_xen())
++		return -ENODEV;
++
++	err = scsifront_xenbus_init();
 +
 +	return err;
 +}
 +
-+void pciback_release_devices(struct pciback_device *pdev)
++static void __exit scsifront_exit(void)
 +{
-+	struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+	struct controller_list_entry *cntrl_entry, *c;
-+	struct controller_dev_entry *dev_entry, *d;
-+
-+	list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
-+		list_for_each_entry_safe(dev_entry, d,
-+					 &cntrl_entry->dev_list, list) {
-+			list_del(&dev_entry->list);
-+			pcistub_put_pci_dev(dev_entry->dev);
-+			kfree(dev_entry);
-+		}
-+		list_del(&cntrl_entry->list);
-+		kfree(cntrl_entry);
-+	}
-+
-+	kfree(dev_data);
-+	pdev->pci_dev_data = NULL;
++	scsifront_xenbus_unregister();
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/Makefile tmp-linux-2.6-xen.patch/drivers/xen/pciback/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/pciback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/Makefile	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,16 @@
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
 +
-+pciback-y := pci_stub.o pciback_ops.o xenbus.o
-+pciback-y += conf_space.o conf_space_header.o \
-+	     conf_space_capability.o \
-+	     conf_space_capability_vpd.o \
-+	     conf_space_capability_pm.o \
-+             conf_space_quirks.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
++module_init(scsifront_init);
++module_exit(scsifront_exit);
 +
-+ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/passthrough.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/passthrough.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/passthrough.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/passthrough.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,157 @@
++MODULE_DESCRIPTION("Xen SCSI frontend driver");
++MODULE_LICENSE("GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/scsifront/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/scsifront/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,421 @@
 +/*
-+ * PCI Backend - Provides restricted access to the real PCI bus topology
-+ *               to the frontend
++ * Xen SCSI frontend driver
 + *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
++ 
 +
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++#include <linux/version.h>
++#include "common.h"
 +
-+struct passthrough_dev_data {
-+	/* Access to dev_list must be protected by lock */
-+	struct list_head dev_list;
-+	spinlock_t lock;
-+};
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++  #define DEFAULT_TASK_COMM_LEN	16
++#else
++  #define DEFAULT_TASK_COMM_LEN	TASK_COMM_LEN
++#endif
 +
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus,
-+				    unsigned int devfn)
++extern struct scsi_host_template scsifront_sht;
++
++static void scsifront_free(struct vscsifrnt_info *info)
 +{
-+	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+	struct pci_dev_entry *dev_entry;
-+	struct pci_dev *dev = NULL;
-+	unsigned long flags;
++	struct Scsi_Host *host = info->host;
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
++	if (host->shost_state != SHOST_DEL) {
++#else
++	if (!test_bit(SHOST_DEL, &host->shost_state)) {
++#endif
++		scsi_remove_host(info->host);
++	}
 +
-+	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+		if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
-+		    && bus == (unsigned int)dev_entry->dev->bus->number
-+		    && devfn == dev_entry->dev->devfn) {
-+			dev = dev_entry->dev;
-+			break;
-+		}
++	if (info->ring_ref != GRANT_INVALID_REF) {
++		gnttab_end_foreign_access(info->ring_ref,
++					(unsigned long)info->ring.sring);
++		info->ring_ref = GRANT_INVALID_REF;
++		info->ring.sring = NULL;
 +	}
 +
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
++	if (info->irq)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = 0;
 +
-+	return dev;
++	scsi_host_put(info->host);
 +}
 +
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++
++static int scsifront_alloc_ring(struct vscsifrnt_info *info)
 +{
-+	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+	struct pci_dev_entry *dev_entry;
-+	unsigned long flags;
++	struct xenbus_device *dev = info->dev;
++	struct vscsiif_sring *sring;
++	int err = -ENOMEM;
 +
-+	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+	if (!dev_entry)
-+		return -ENOMEM;
-+	dev_entry->dev = dev;
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
-+	list_add_tail(&dev_entry->list, &dev_data->dev_list);
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
++	info->ring_ref = GRANT_INVALID_REF;
 +
-+	return 0;
-+}
++	/***** Frontend to Backend ring start *****/
++	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
++	if (!sring) {
++		xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");
++		return err;
++	}
++	SHARED_RING_INIT(sring);
++	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 +
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+	struct pci_dev_entry *dev_entry, *t;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
++	err = xenbus_grant_ring(dev, virt_to_mfn(sring));
++	if (err < 0) {
++		free_page((unsigned long) sring);
++		info->ring.sring = NULL;
++		xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");
++		goto free_sring;
++	}
++	info->ring_ref = err;
 +
-+	spin_lock_irqsave(&dev_data->lock, flags);
++	err = bind_listening_port_to_irqhandler(
++			dev->otherend_id, scsifront_intr,
++			SA_SAMPLE_RANDOM, "scsifront", info);
 +
-+	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+		if (dev_entry->dev == dev) {
-+			list_del(&dev_entry->list);
-+			found_dev = dev_entry->dev;
-+			kfree(dev_entry);
-+		}
++	if (err <= 0) {
++		xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");
++		goto free_sring;
 +	}
++	info->irq = err;
 +
-+	spin_unlock_irqrestore(&dev_data->lock, flags);
++	return 0;
 +
-+	if (found_dev)
-+		pcistub_put_pci_dev(found_dev);
++/* free resource */
++free_sring:
++	scsifront_free(info);
++
++	return err;
 +}
 +
-+int pciback_init_devices(struct pciback_device *pdev)
++
++static int scsifront_init_ring(struct vscsifrnt_info *info)
 +{
-+	struct passthrough_dev_data *dev_data;
++	struct xenbus_device *dev = info->dev;
++	struct xenbus_transaction xbt;
++	int err;
 +
-+	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+	if (!dev_data)
-+		return -ENOMEM;
++	DPRINTK("%s\n",__FUNCTION__);
 +
-+	spin_lock_init(&dev_data->lock);
++	err = scsifront_alloc_ring(info);
++	if (err)
++		return err;
++	DPRINTK("%u %u\n", info->ring_ref, info->evtchn);
 +
-+	INIT_LIST_HEAD(&dev_data->dev_list);
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++	}
 +
-+	pdev->pci_dev_data = dev_data;
++	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
++				info->ring_ref);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
++		goto fail;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++				irq_to_evtchn_port(info->irq));
++
++	if (err) {
++		xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
++		goto fail;
++	}
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err) {
++		if (err == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto free_sring;
++	}
 +
 +	return 0;
++
++fail:
++	xenbus_transaction_end(xbt, 1);
++free_sring:
++	/* free resource */
++	scsifront_free(info);
++	
++	return err;
 +}
 +
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+			      publish_pci_root_cb publish_root_cb)
++
++static int scsifront_probe(struct xenbus_device *dev,
++				const struct xenbus_device_id *id)
 +{
-+	int err = 0;
-+	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+	struct pci_dev_entry *dev_entry, *e;
-+	struct pci_dev *dev;
-+	int found;
-+	unsigned int domain, bus;
++	struct vscsifrnt_info *info;
++	struct Scsi_Host *host;
++	int i, err = -ENOMEM;
++	char name[DEFAULT_TASK_COMM_LEN];
++
++	host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
++	if (!host) {
++		xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
++		return err;
++	}
++	info = (struct vscsifrnt_info *) host->hostdata;
++	info->host = host;
 +
-+	spin_lock(&dev_data->lock);
 +
-+	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+		/* Only publish this device as a root if none of its
-+		 * parent bridges are exported
-+		 */
-+		found = 0;
-+		dev = dev_entry->dev->bus->self;
-+		for (; !found && dev != NULL; dev = dev->bus->self) {
-+			list_for_each_entry(e, &dev_data->dev_list, list) {
-+				if (dev == e->dev) {
-+					found = 1;
-+					break;
-+				}
-+			}
-+		}
++	dev->dev.driver_data = info;
++	info->dev  = dev;
 +
-+		domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
-+		bus = (unsigned int)dev_entry->dev->bus->number;
++	for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
++		info->shadow[i].next_free = i + 1;
++		init_waitqueue_head(&(info->shadow[i].wq_reset));
++		info->shadow[i].wait_reset = 0;
++	}
++	info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff;
 +
-+		if (!found) {
-+			err = publish_root_cb(pdev, domain, bus);
-+			if (err)
-+				break;
-+		}
++	err = scsifront_init_ring(info);
++	if (err) {
++		scsi_host_put(host);
++		return err;
 +	}
 +
-+	spin_unlock(&dev_data->lock);
++	init_waitqueue_head(&info->wq);
++	spin_lock_init(&info->io_lock);
++	spin_lock_init(&info->shadow_lock);
 +
-+	return err;
-+}
++	snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no);
 +
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+	struct pci_dev_entry *dev_entry, *t;
++	info->kthread = kthread_run(scsifront_schedule, info, name);
++	if (IS_ERR(info->kthread)) {
++		err = PTR_ERR(info->kthread);
++		info->kthread = NULL;
++		printk(KERN_ERR "scsifront: kthread start err %d\n", err);
++		goto free_sring;
++	}
 +
-+	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+		list_del(&dev_entry->list);
-+		pcistub_put_pci_dev(dev_entry->dev);
-+		kfree(dev_entry);
++	host->max_id      = VSCSIIF_MAX_TARGET;
++	host->max_channel = 0;
++	host->max_lun     = VSCSIIF_MAX_LUN;
++	host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;
++
++	err = scsi_add_host(host, &dev->dev);
++	if (err) {
++		printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
++		goto free_sring;
 +	}
 +
-+	kfree(dev_data);
-+	pdev->pci_dev_data = NULL;
++	xenbus_switch_state(dev, XenbusStateInitialised);
++
++	return 0;
++
++free_sring:
++	/* free resource */
++	scsifront_free(info);
++	return err;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/pciback.h tmp-linux-2.6-xen.patch/drivers/xen/pciback/pciback.h
---- pristine-linux-2.6.18.2/drivers/xen/pciback/pciback.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/pciback.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,93 @@
-+/*
-+ * PCI Backend Common Data Structures & Function Declarations
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIBACK_H__
-+#define __XEN_PCIBACK_H__
 +
-+#include <linux/pci.h>
-+#include <linux/interrupt.h>
-+#include <xen/xenbus.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/workqueue.h>
-+#include <asm/atomic.h>
-+#include <xen/interface/io/pciif.h>
++static int scsifront_remove(struct xenbus_device *dev)
++{
++	struct vscsifrnt_info *info = dev->dev.driver_data;
 +
-+struct pci_dev_entry {
-+	struct list_head list;
-+	struct pci_dev *dev;
-+};
++	DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename);
 +
-+#define _PDEVF_op_active 	(0)
-+#define PDEVF_op_active 	(1<<(_PDEVF_op_active))
++	if (info->kthread) {
++		kthread_stop(info->kthread);
++		info->kthread = NULL;
++	}
 +
-+struct pciback_device {
-+	void *pci_dev_data;
-+	spinlock_t dev_lock;
++	scsifront_free(info);
++	
++	return 0;
++}
 +
-+	struct xenbus_device *xdev;
 +
-+	struct xenbus_watch be_watch;
-+	u8 be_watching;
++static int scsifront_disconnect(struct vscsifrnt_info *info)
++{
++	struct xenbus_device *dev = info->dev;
++	struct Scsi_Host *host = info->host;
 +
-+	int evtchn_irq;
++	DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename);
 +
-+	struct vm_struct *sh_area;
-+	struct xen_pci_sharedinfo *sh_info;
++	/* 
++	  When this function is executed,  all devices of 
++	  Frontend have been deleted. 
++	  Therefore, it need not block I/O before remove_host.
++	*/
 +
-+	unsigned long flags;
++	scsi_remove_host(host);
++	xenbus_frontend_closed(dev);
 +
-+	struct work_struct op_work;
-+};
++	return 0;
++}
 +
-+struct pciback_dev_data {
-+	struct list_head config_fields;
-+	int permissive;
-+	int warned_on_write;
-+};
++#define VSCSIFRONT_OP_ADD_LUN	1
++#define VSCSIFRONT_OP_DEL_LUN	2
 +
-+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+					    int domain, int bus,
-+					    int slot, int func);
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+				    struct pci_dev *dev);
-+void pcistub_put_pci_dev(struct pci_dev *dev);
++static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
++{
++	struct xenbus_device *dev = info->dev;
++	int i, err = 0;
++	char str[64], state_str[64];
++	char **dir;
++	unsigned int dir_n = 0;
++	unsigned int device_state;
++	unsigned int hst, chn, tgt, lun;
++	struct scsi_device *sdev;
 +
-+/* Ensure a device is turned off or reset */
-+void pciback_reset_device(struct pci_dev *pdev);
++	dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
++	if (IS_ERR(dir))
++		return;
 +
-+/* Access a virtual configuration space for a PCI device */
-+int pciback_config_init(void);
-+int pciback_config_init_dev(struct pci_dev *dev);
-+void pciback_config_free_dyn_fields(struct pci_dev *dev);
-+void pciback_config_reset_dev(struct pci_dev *dev);
-+void pciback_config_free_dev(struct pci_dev *dev);
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+			u32 * ret_val);
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
++	for (i = 0; i < dir_n; i++) {
++		/* read status */
++		snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
++		err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
++			&device_state);
++		if (XENBUS_EXIST_ERR(err))
++			continue;
++		
++		/* virtual SCSI device */
++		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++		err = xenbus_scanf(XBT_NIL, dev->otherend, str,
++			"%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
++		if (XENBUS_EXIST_ERR(err))
++			continue;
 +
-+/* Handle requests for specific devices from the frontend */
-+typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
-+				    unsigned int domain, unsigned int bus);
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus,
-+				    unsigned int devfn);
-+int pciback_init_devices(struct pciback_device *pdev);
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+			      publish_pci_root_cb cb);
-+void pciback_release_devices(struct pciback_device *pdev);
++		/* front device state path */
++		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
 +
-+/* Handles events from front-end */
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
-+void pciback_do_op(void *data);
++		switch (op) {
++		case VSCSIFRONT_OP_ADD_LUN:
++			if (device_state == XenbusStateInitialised) {
++				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++				if (sdev) {
++					printk(KERN_ERR "scsifront: Device already in use.\n");
++					scsi_device_put(sdev);
++					xenbus_printf(XBT_NIL, dev->nodename,
++						state_str, "%d", XenbusStateClosed);
++				} else {
++					scsi_add_device(info->host, chn, tgt, lun);
++					xenbus_printf(XBT_NIL, dev->nodename,
++						state_str, "%d", XenbusStateConnected);
++				}
++			}
++			break;
++		case VSCSIFRONT_OP_DEL_LUN:
++			if (device_state == XenbusStateClosing) {
++				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++				if (sdev) {
++					scsi_remove_device(sdev);
++					scsi_device_put(sdev);
++					xenbus_printf(XBT_NIL, dev->nodename,
++						state_str, "%d", XenbusStateClosed);
++				}
++			}
++			break;
++		default:
++			break;
++		}
++	}
++	
++	kfree(dir);
++	return;
++}
 +
-+int pciback_xenbus_register(void);
-+void pciback_xenbus_unregister(void);
 +
-+extern int verbose_request;
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/pciback_ops.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/pciback_ops.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/pciback_ops.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/pciback_ops.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,95 @@
-+/*
-+ * PCI Backend Operations - respond to PCI requests from Frontend
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <asm/bitops.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
 +
-+int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
 +
-+/* Ensure a device is "turned off" and ready to be exported.
-+ * (Also see pciback_config_reset to ensure virtual configuration space is
-+ * ready to be re-exported)
-+ */
-+void pciback_reset_device(struct pci_dev *dev)
++static void scsifront_backend_changed(struct xenbus_device *dev,
++				enum xenbus_state backend_state)
 +{
-+	u16 cmd;
++	struct vscsifrnt_info *info = dev->dev.driver_data;
 +
-+	/* Disable devices (but not bridges) */
-+	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+		pci_disable_device(dev);
++	DPRINTK("%p %u %u\n", dev, dev->state, backend_state);
 +
-+		pci_write_config_word(dev, PCI_COMMAND, 0);
++	switch (backend_state) {
++	case XenbusStateUnknown:
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateClosed:
++		break;
 +
-+		dev->is_enabled = 0;
-+		dev->is_busmaster = 0;
-+	} else {
-+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+		if (cmd & (PCI_COMMAND_INVALIDATE)) {
-+			cmd &= ~(PCI_COMMAND_INVALIDATE);
-+			pci_write_config_word(dev, PCI_COMMAND, cmd);
++	case XenbusStateInitialised:
++		break;
 +
-+			dev->is_busmaster = 0;
++	case XenbusStateConnected:
++		if (xenbus_read_driver_state(dev->nodename) ==
++			XenbusStateInitialised) {
++			scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
 +		}
++		
++		if (dev->state == XenbusStateConnected)
++			break;
++			
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
++
++	case XenbusStateClosing:
++		scsifront_disconnect(info);
++		break;
++
++	case XenbusStateReconfiguring:
++		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
++		xenbus_switch_state(dev, XenbusStateReconfiguring);
++		break;
++
++	case XenbusStateReconfigured:
++		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
 +	}
 +}
 +
-+static inline void test_and_schedule_op(struct pciback_device *pdev)
++
++static struct xenbus_device_id scsifront_ids[] = {
++	{ "vscsi" },
++	{ "" }
++};
++
++
++static struct xenbus_driver scsifront_driver = {
++	.name			= "vscsi",
++	.owner			= THIS_MODULE,
++	.ids			= scsifront_ids,
++	.probe			= scsifront_probe,
++	.remove			= scsifront_remove,
++/* 	.resume			= scsifront_resume, */
++	.otherend_changed	= scsifront_backend_changed,
++};
++
++int scsifront_xenbus_init(void)
 +{
-+	/* Check that frontend is requesting an operation and that we are not
-+	 * already processing a request */
-+	if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
-+	    && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
-+		schedule_work(&pdev->op_work);
++	return xenbus_register_frontend(&scsifront_driver);
 +}
 +
-+/* Performing the configuration space reads/writes must not be done in atomic
-+ * context because some of the pci_* functions can sleep (mostly due to ACPI
-+ * use of semaphores). This function is intended to be called from a work
-+ * queue in process context taking a struct pciback_device as a parameter */
-+void pciback_do_op(void *data)
++void scsifront_xenbus_unregister(void)
 +{
-+	struct pciback_device *pdev = data;
-+	struct pci_dev *dev;
-+	struct xen_pci_op *op = &pdev->sh_info->op;
++	xenbus_unregister_driver(&scsifront_driver);
++}
 +
-+	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/tpmback/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/tpmback/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,4 @@
 +
-+	if (dev == NULL)
-+		op->err = XEN_PCI_ERR_dev_not_found;
-+	else if (op->cmd == XEN_PCI_OP_conf_read)
-+		op->err = pciback_config_read(dev, op->offset, op->size,
-+					      &op->value);
-+	else if (op->cmd == XEN_PCI_OP_conf_write)
-+		op->err = pciback_config_write(dev, op->offset, op->size,
-+					       op->value);
-+	else
-+		op->err = XEN_PCI_ERR_not_implemented;
++obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
 +
-+	/* Tell the driver domain that we're done. */ 
-+	wmb();
-+	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+	notify_remote_via_irq(pdev->evtchn_irq);
++tpmbk-y += tpmback.o interface.o xenbus.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/tpmback/common.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/tpmback/common.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * drivers/xen/tpmback/common.h
++ */
 +
-+	/* Mark that we're done. */
-+	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
-+	clear_bit(_PDEVF_op_active, &pdev->flags);
-+	smp_mb__after_clear_bit(); /* /before/ final check for work */
++#ifndef __TPM__BACKEND__COMMON_H__
++#define __TPM__BACKEND__COMMON_H__
 +
-+	/* Check to see if the driver domain tried to start another request in
-+	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
-+	test_and_schedule_op(pdev);
-+}
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
 +
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	struct pciback_device *pdev = dev_id;
++#define DPRINTK(_f, _a...)			\
++	pr_debug("(file=%s, line=%d) " _f,	\
++		 __FILE__ , __LINE__ , ## _a )
 +
-+	test_and_schedule_op(pdev);
++struct backend_info;
 +
-+	return IRQ_HANDLED;
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/pci_stub.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/pci_stub.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/pci_stub.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/pci_stub.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,929 @@
-+/*
-+ * PCI Stub Driver - Grabs devices in backend to be exported later
-+ *
-+ * Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ * Chris Bookholt <hap10 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/kref.h>
-+#include <asm/atomic.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
++typedef struct tpmif_st {
++	struct list_head tpmif_list;
++	/* Unique identifier for this interface. */
++	domid_t domid;
++	unsigned int handle;
 +
-+static char *pci_devs_to_hide = NULL;
-+module_param_named(hide, pci_devs_to_hide, charp, 0444);
++	/* Physical parameters of the comms window. */
++	unsigned int irq;
 +
-+struct pcistub_device_id {
-+	struct list_head slot_list;
-+	int domain;
-+	unsigned char bus;
-+	unsigned int devfn;
-+};
-+static LIST_HEAD(pcistub_device_ids);
-+static DEFINE_SPINLOCK(device_ids_lock);
++	/* The shared rings and indexes. */
++	tpmif_tx_interface_t *tx;
++	struct vm_struct *tx_area;
 +
-+struct pcistub_device {
-+	struct kref kref;
-+	struct list_head dev_list;
-+	spinlock_t lock;
++	/* Miscellaneous private stuff. */
++	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++	int active;
 +
-+	struct pci_dev *dev;
-+	struct pciback_device *pdev;	/* non-NULL if struct pci_dev is in use */
-+};
++	struct tpmif_st *hash_next;
++	struct list_head list;	/* scheduling list */
++	atomic_t refcnt;
 +
-+/* Access to pcistub_devices & seized_devices lists and the initialize_devices
-+ * flag must be locked with pcistub_devices_lock
-+ */
-+static DEFINE_SPINLOCK(pcistub_devices_lock);
-+static LIST_HEAD(pcistub_devices);
++	struct backend_info *bi;
 +
-+/* wait for device_initcall before initializing our devices
-+ * (see pcistub_init_devices_late)
-+ */
-+static int initialize_devices = 0;
-+static LIST_HEAD(seized_devices);
++	grant_handle_t shmem_handle;
++	grant_ref_t shmem_ref;
++	struct page **mmap_pages;
 +
-+static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
-+{
-+	struct pcistub_device *psdev;
++	char devname[20];
++} tpmif_t;
 +
-+	dev_dbg(&dev->dev, "pcistub_device_alloc\n");
++void tpmif_disconnect_complete(tpmif_t * tpmif);
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
++void tpmif_interface_init(void);
++void tpmif_interface_exit(void);
++void tpmif_schedule_work(tpmif_t * tpmif);
++void tpmif_deschedule_work(tpmif_t * tpmif);
++void tpmif_xenbus_init(void);
++void tpmif_xenbus_exit(void);
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 +
-+	psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
-+	if (!psdev)
-+		return NULL;
++long int tpmback_get_instance(struct backend_info *bi);
 +
-+	psdev->dev = pci_dev_get(dev);
-+	if (!psdev->dev) {
-+		kfree(psdev);
-+		return NULL;
-+	}
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++
++
++#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define tpmif_put(_b)					\
++	do {						\
++		if (atomic_dec_and_test(&(_b)->refcnt))	\
++			tpmif_disconnect_complete(_b);	\
++	} while (0)
++
++extern int num_frontends;
++
++static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
++{
++	return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
++}
++
++#endif /* __TPMIF__BACKEND__COMMON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/tpmback/interface.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/tpmback/interface.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,168 @@
++ /*****************************************************************************
++ * drivers/xen/tpmback/interface.c
++ *
++ * Vritual TPM interface management.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ *
++ * This code has been derived from drivers/xen/netback/interface.c
++ * Copyright (c) 2004, Keir Fraser
++ */
 +
-+	kref_init(&psdev->kref);
-+	spin_lock_init(&psdev->lock);
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
 +
-+	return psdev;
-+}
++static kmem_cache_t *tpmif_cachep;
++int num_frontends = 0;
 +
-+/* Don't call this directly as it's called by pcistub_device_put */
-+static void pcistub_device_release(struct kref *kref)
++LIST_HEAD(tpmif_list);
++
++static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
 +{
-+	struct pcistub_device *psdev;
++	tpmif_t *tpmif;
 +
-+	psdev = container_of(kref, struct pcistub_device, kref);
++	tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
++	if (tpmif == NULL)
++		goto out_of_memory;
 +
-+	dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
++	memset(tpmif, 0, sizeof (*tpmif));
++	tpmif->domid = domid;
++	tpmif->status = DISCONNECTED;
++	tpmif->bi = bi;
++	snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
++	atomic_set(&tpmif->refcnt, 1);
 +
-+	/* Clean-up the device */
-+	pciback_reset_device(psdev->dev);
-+	pciback_config_free_dyn_fields(psdev->dev);
-+	pciback_config_free_dev(psdev->dev);
-+	kfree(pci_get_drvdata(psdev->dev));
-+	pci_set_drvdata(psdev->dev, NULL);
++	tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
++	if (tpmif->mmap_pages == NULL)
++		goto out_of_memory;
 +
-+	pci_dev_put(psdev->dev);
++	list_add(&tpmif->tpmif_list, &tpmif_list);
++	num_frontends++;
 +
-+	kfree(psdev);
-+}
++	return tpmif;
 +
-+static inline void pcistub_device_get(struct pcistub_device *psdev)
-+{
-+	kref_get(&psdev->kref);
++ out_of_memory:
++	if (tpmif != NULL)
++		kmem_cache_free(tpmif_cachep, tpmif);
++	printk("%s: out of memory\n", __FUNCTION__);
++	return ERR_PTR(-ENOMEM);
 +}
 +
-+static inline void pcistub_device_put(struct pcistub_device *psdev)
++static void free_tpmif(tpmif_t * tpmif)
 +{
-+	kref_put(&psdev->kref, pcistub_device_release);
++	num_frontends--;
++	list_del(&tpmif->tpmif_list);
++	free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
++	kmem_cache_free(tpmif_cachep, tpmif);
 +}
 +
-+static struct pcistub_device *pcistub_device_find(int domain, int bus,
-+						  int slot, int func)
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
 +{
-+	struct pcistub_device *psdev = NULL;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
++	tpmif_t *tpmif;
 +
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (psdev->dev != NULL
-+		    && domain == pci_domain_nr(psdev->dev->bus)
-+		    && bus == psdev->dev->bus->number
-+		    && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+			pcistub_device_get(psdev);
-+			goto out;
++	list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
++		if (tpmif->bi == bi) {
++			if (tpmif->domid == domid) {
++				tpmif_get(tpmif);
++				return tpmif;
++			} else {
++				return ERR_PTR(-EEXIST);
++			}
 +		}
 +	}
 +
-+	/* didn't find it */
-+	psdev = NULL;
-+
-+      out:
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+	return psdev;
++	return alloc_tpmif(domid, bi);
 +}
 +
-+static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
-+						  struct pcistub_device *psdev)
++static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
 +{
-+	struct pci_dev *pci_dev = NULL;
-+	unsigned long flags;
++	struct gnttab_map_grant_ref op;
 +
-+	pcistub_device_get(psdev);
++	gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
++			  GNTMAP_host_map, shared_page, tpmif->domid);
 +
-+	spin_lock_irqsave(&psdev->lock, flags);
-+	if (!psdev->pdev) {
-+		psdev->pdev = pdev;
-+		pci_dev = psdev->dev;
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
++
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
 +	}
-+	spin_unlock_irqrestore(&psdev->lock, flags);
 +
-+	if (!pci_dev)
-+		pcistub_device_put(psdev);
++	tpmif->shmem_ref = shared_page;
++	tpmif->shmem_handle = op.handle;
 +
-+	return pci_dev;
++	return 0;
 +}
 +
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+					    int domain, int bus,
-+					    int slot, int func)
++static void unmap_frontend_page(tpmif_t *tpmif)
 +{
-+	struct pcistub_device *psdev;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
++	struct gnttab_unmap_grant_ref op;
 +
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (psdev->dev != NULL
-+		    && domain == pci_domain_nr(psdev->dev->bus)
-+		    && bus == psdev->dev->bus->number
-+		    && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+			found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+			break;
-+		}
-+	}
++	gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
++			    GNTMAP_host_map, tpmif->shmem_handle);
 +
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+	return found_dev;
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
 +}
 +
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+				    struct pci_dev *dev)
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
 +{
-+	struct pcistub_device *psdev;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (psdev->dev == dev) {
-+			found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+			break;
-+		}
-+	}
-+
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+	return found_dev;
-+}
++	int err;
 +
-+void pcistub_put_pci_dev(struct pci_dev *dev)
-+{
-+	struct pcistub_device *psdev, *found_psdev = NULL;
-+	unsigned long flags;
++	if (tpmif->irq)
++		return 0;
 +
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
++	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++		return -ENOMEM;
 +
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (psdev->dev == dev) {
-+			found_psdev = psdev;
-+			break;
-+		}
++	err = map_frontend_page(tpmif, shared_page);
++	if (err) {
++		free_vm_area(tpmif->tx_area);
++		return err;
 +	}
 +
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++	memset(tpmif->tx, 0, PAGE_SIZE);
 +
-+	/* Cleanup our device
-+	 * (so it's ready for the next domain)
-+	 */
-+	pciback_reset_device(found_psdev->dev);
-+	pciback_config_free_dyn_fields(found_psdev->dev);
-+	pciback_config_reset_dev(found_psdev->dev);
++	err = bind_interdomain_evtchn_to_irqhandler(
++		tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
++	if (err < 0) {
++		unmap_frontend_page(tpmif);
++		free_vm_area(tpmif->tx_area);
++		return err;
++	}
++	tpmif->irq = err;
 +
-+	spin_lock_irqsave(&found_psdev->lock, flags);
-+	found_psdev->pdev = NULL;
-+	spin_unlock_irqrestore(&found_psdev->lock, flags);
++	tpmif->shmem_ref = shared_page;
++	tpmif->active = 1;
 +
-+	pcistub_device_put(found_psdev);
++	return 0;
 +}
 +
-+static int __devinit pcistub_match_one(struct pci_dev *dev,
-+				       struct pcistub_device_id *pdev_id)
++void tpmif_disconnect_complete(tpmif_t *tpmif)
 +{
-+	/* Match the specified device by domain, bus, slot, func and also if
-+	 * any of the device's parent bridges match.
-+	 */
-+	for (; dev != NULL; dev = dev->bus->self) {
-+		if (pci_domain_nr(dev->bus) == pdev_id->domain
-+		    && dev->bus->number == pdev_id->bus
-+		    && dev->devfn == pdev_id->devfn)
-+			return 1;
++	if (tpmif->irq)
++		unbind_from_irqhandler(tpmif->irq, tpmif);
 +
-+		/* Sometimes topmost bridge links to itself. */
-+		if (dev == dev->bus->self)
-+			break;
++	if (tpmif->tx) {
++		unmap_frontend_page(tpmif);
++		free_vm_area(tpmif->tx_area);
 +	}
 +
-+	return 0;
++	free_tpmif(tpmif);
 +}
 +
-+static int __devinit pcistub_match(struct pci_dev *dev)
++void __init tpmif_interface_init(void)
 +{
-+	struct pcistub_device_id *pdev_id;
-+	unsigned long flags;
-+	int found = 0;
-+
-+	spin_lock_irqsave(&device_ids_lock, flags);
-+	list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
-+		if (pcistub_match_one(dev, pdev_id)) {
-+			found = 1;
-+			break;
-+		}
-+	}
-+	spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+	return found;
++	tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
++					 0, 0, NULL, NULL);
 +}
 +
-+static int __devinit pcistub_init_device(struct pci_dev *dev)
++void __exit tpmif_interface_exit(void)
 +{
-+	struct pciback_dev_data *dev_data;
-+	int err = 0;
++	kmem_cache_destroy(tpmif_cachep);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/tpmback/tpmback.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/tpmback/tpmback.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,944 @@
++/******************************************************************************
++ * drivers/xen/tpmback/tpmback.c
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netback/netback.c
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ */
 +
-+	dev_dbg(&dev->dev, "initializing...\n");
++#include "common.h"
++#include <xen/evtchn.h>
 +
-+	/* The PCI backend is not intended to be a module (or to work with
-+	 * removable PCI devices (yet). If it were, pciback_config_free()
-+	 * would need to be called somewhere to free the memory allocated
-+	 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
-+	 */
-+	dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
-+	if (!dev_data) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
-+	pci_set_drvdata(dev, dev_data);
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
 +
-+	dev_dbg(&dev->dev, "initializing config\n");
-+	err = pciback_config_init_dev(dev);
-+	if (err)
-+		goto out;
++/* local data structures */
++struct data_exchange {
++	struct list_head pending_pak;
++	struct list_head current_pak;
++	unsigned int copied_so_far;
++	u8 has_opener:1;
++	u8 aborted:1;
++	rwlock_t pak_lock;	// protects all of the previous fields
++	wait_queue_head_t wait_queue;
++};
 +
-+	/* HACK: Force device (& ACPI) to determine what IRQ it's on - we
-+	 * must do this here because pcibios_enable_device may specify
-+	 * the pci device's true irq (and possibly its other resources)
-+	 * if they differ from what's in the configuration space.
-+	 * This makes the assumption that the device's resources won't
-+	 * change after this point (otherwise this code may break!)
-+	 */
-+	dev_dbg(&dev->dev, "enabling device\n");
-+	err = pci_enable_device(dev);
-+	if (err)
-+		goto config_release;
++struct vtpm_resp_hdr {
++	uint32_t instance_no;
++	uint16_t tag_no;
++	uint32_t len_no;
++	uint32_t ordinal_no;
++} __attribute__ ((packed));
 +
-+	/* Now disable the device (this also ensures some private device
-+	 * data is setup before we export)
-+	 */
-+	dev_dbg(&dev->dev, "reset device\n");
-+	pciback_reset_device(dev);
++struct packet {
++	struct list_head next;
++	unsigned int data_len;
++	u8 *data_buffer;
++	tpmif_t *tpmif;
++	u32 tpm_instance;
++	u8 req_tag;
++	u32 last_read;
++	u8 flags;
++	struct timer_list processing_timer;
++};
 +
-+	return 0;
++enum {
++	PACKET_FLAG_DISCARD_RESPONSE = 1,
++};
 +
-+      config_release:
-+	pciback_config_free_dev(dev);
++/* local variables */
++static struct data_exchange dataex;
 +
-+      out:
-+	pci_set_drvdata(dev, NULL);
-+	kfree(dev_data);
-+	return err;
-+}
++/* local function prototypes */
++static int _packet_write(struct packet *pak,
++			 const char *data, size_t size, int userbuffer);
++static void processing_timeout(unsigned long ptr);
++static int packet_read_shmem(struct packet *pak,
++			     tpmif_t * tpmif,
++			     u32 offset,
++			     char *buffer, int isuserbuffer, u32 left);
++static int vtpm_queue_packet(struct packet *pak);
 +
-+/*
-+ * Because some initialization still happens on
-+ * devices during fs_initcall, we need to defer
-+ * full initialization of our devices until
-+ * device_initcall.
-+ */
-+static int __init pcistub_init_devices_late(void)
++/***************************************************************
++ Buffer copying fo user and kernel space buffes.
++***************************************************************/
++static inline int copy_from_buffer(void *to,
++				   const void *from, unsigned long size,
++				   int isuserbuffer)
 +{
-+	struct pcistub_device *psdev;
-+	unsigned long flags;
-+	int err = 0;
++	if (isuserbuffer) {
++		if (copy_from_user(to, (void __user *)from, size))
++			return -EFAULT;
++	} else {
++		memcpy(to, from, size);
++	}
++	return 0;
++}
 +
-+	pr_debug("pciback: pcistub_init_devices_late\n");
++static inline int copy_to_buffer(void *to,
++				 const void *from, unsigned long size,
++				 int isuserbuffer)
++{
++	if (isuserbuffer) {
++		if (copy_to_user((void __user *)to, from, size))
++			return -EFAULT;
++	} else {
++		memcpy(to, from, size);
++	}
++	return 0;
++}
 +
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+	while (!list_empty(&seized_devices)) {
-+		psdev = container_of(seized_devices.next,
-+				     struct pcistub_device, dev_list);
-+		list_del(&psdev->dev_list);
++static void dataex_init(struct data_exchange *dataex)
++{
++	INIT_LIST_HEAD(&dataex->pending_pak);
++	INIT_LIST_HEAD(&dataex->current_pak);
++	dataex->has_opener = 0;
++	rwlock_init(&dataex->pak_lock);
++	init_waitqueue_head(&dataex->wait_queue);
++}
 +
-+		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++/***************************************************************
++ Packet-related functions
++***************************************************************/
 +
-+		err = pcistub_init_device(psdev->dev);
-+		if (err) {
-+			dev_err(&psdev->dev->dev,
-+				"error %d initializing device\n", err);
-+			kfree(psdev);
-+			psdev = NULL;
-+		}
++static struct packet *packet_find_instance(struct list_head *head,
++					   u32 tpm_instance)
++{
++	struct packet *pak;
++	struct list_head *p;
 +
-+		spin_lock_irqsave(&pcistub_devices_lock, flags);
++	/*
++	 * traverse the list of packets and return the first
++	 * one with the given instance number
++	 */
++	list_for_each(p, head) {
++		pak = list_entry(p, struct packet, next);
 +
-+		if (psdev)
-+			list_add_tail(&psdev->dev_list, &pcistub_devices);
++		if (pak->tpm_instance == tpm_instance) {
++			return pak;
++		}
 +	}
++	return NULL;
++}
 +
-+	initialize_devices = 1;
++static struct packet *packet_find_packet(struct list_head *head, void *packet)
++{
++	struct packet *pak;
++	struct list_head *p;
 +
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++	/*
++	 * traverse the list of packets and return the first
++	 * one with the given instance number
++	 */
++	list_for_each(p, head) {
++		pak = list_entry(p, struct packet, next);
 +
-+	return 0;
++		if (pak == packet) {
++			return pak;
++		}
++	}
++	return NULL;
 +}
 +
-+static int __devinit pcistub_seize(struct pci_dev *dev)
++static struct packet *packet_alloc(tpmif_t * tpmif,
++				   u32 size, u8 req_tag, u8 flags)
 +{
-+	struct pcistub_device *psdev;
-+	unsigned long flags;
-+	int err = 0;
++	struct packet *pak = NULL;
++	pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
++	if (NULL != pak) {
++		if (tpmif) {
++			pak->tpmif = tpmif;
++			pak->tpm_instance = tpmback_get_instance(tpmif->bi);
++			tpmif_get(tpmif);
++		}
++		pak->data_len = size;
++		pak->req_tag = req_tag;
++		pak->last_read = 0;
++		pak->flags = flags;
 +
-+	psdev = pcistub_device_alloc(dev);
-+	if (!psdev)
-+		return -ENOMEM;
++		/*
++		 * cannot do tpmif_get(tpmif); bad things happen
++		 * on the last tpmif_put()
++		 */
++		init_timer(&pak->processing_timer);
++		pak->processing_timer.function = processing_timeout;
++		pak->processing_timer.data = (unsigned long)pak;
++	}
++	return pak;
++}
 +
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
++static void inline packet_reset(struct packet *pak)
++{
++	pak->last_read = 0;
++}
 +
-+	if (initialize_devices) {
-+		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++static void packet_free(struct packet *pak)
++{
++	if (timer_pending(&pak->processing_timer)) {
++		BUG();
++	}
 +
-+		/* don't want irqs disabled when calling pcistub_init_device */
-+		err = pcistub_init_device(psdev->dev);
++	if (pak->tpmif)
++		tpmif_put(pak->tpmif);
++	kfree(pak->data_buffer);
++	/*
++	 * cannot do tpmif_put(pak->tpmif); bad things happen
++	 * on the last tpmif_put()
++	 */
++	kfree(pak);
++}
 +
-+		spin_lock_irqsave(&pcistub_devices_lock, flags);
 +
-+		if (!err)
-+			list_add(&psdev->dev_list, &pcistub_devices);
++/*
++ * Write data to the shared memory and send it to the FE.
++ */
++static int packet_write(struct packet *pak,
++			const char *data, size_t size, int isuserbuffer)
++{
++	int rc = 0;
++
++	if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
++		/* Don't send a respone to this packet. Just acknowledge it. */
++		rc = size;
 +	} else {
-+		dev_dbg(&dev->dev, "deferring initialization\n");
-+		list_add(&psdev->dev_list, &seized_devices);
++		rc = _packet_write(pak, data, size, isuserbuffer);
 +	}
 +
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+	if (err)
-+		pcistub_device_put(psdev);
-+
-+	return err;
++	return rc;
 +}
 +
-+static int __devinit pcistub_probe(struct pci_dev *dev,
-+				   const struct pci_device_id *id)
++int _packet_write(struct packet *pak,
++		  const char *data, size_t size, int isuserbuffer)
 +{
-+	int err = 0;
-+
-+	dev_dbg(&dev->dev, "probing...\n");
++	/*
++	 * Write into the shared memory pages directly
++	 * and send it to the front end.
++	 */
++	tpmif_t *tpmif = pak->tpmif;
++	grant_handle_t handle;
++	int rc = 0;
++	unsigned int i = 0;
++	unsigned int offset = 0;
 +
-+	if (pcistub_match(dev)) {
++	if (tpmif == NULL) {
++		return -EFAULT;
++	}
 +
-+		if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
-+		    && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
-+			dev_err(&dev->dev, "can't export pci devices that "
-+				"don't have a normal (0) or bridge (1) "
-+				"header type!\n");
-+			err = -ENODEV;
-+			goto out;
-+		}
++	if (tpmif->status == DISCONNECTED) {
++		return size;
++	}
 +
-+		dev_info(&dev->dev, "seizing device\n");
-+		err = pcistub_seize(dev);
-+	} else
-+		/* Didn't find the device */
-+		err = -ENODEV;
++	while (offset < size && i < TPMIF_TX_RING_SIZE) {
++		unsigned int tocopy;
++		struct gnttab_map_grant_ref map_op;
++		struct gnttab_unmap_grant_ref unmap_op;
++		tpmif_tx_request_t *tx;
 +
-+      out:
-+	return err;
-+}
++		tx = &tpmif->tx->ring[i].req;
 +
-+static void pcistub_remove(struct pci_dev *dev)
-+{
-+	struct pcistub_device *psdev, *found_psdev = NULL;
-+	unsigned long flags;
++		if (0 == tx->addr) {
++			DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
++			return 0;
++		}
 +
-+	dev_dbg(&dev->dev, "removing\n");
++		gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++				  GNTMAP_host_map, tx->ref, tpmif->domid);
 +
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
++		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++						       &map_op, 1))) {
++			BUG();
++		}
 +
-+	pciback_config_quirk_release(dev);
++		handle = map_op.handle;
 +
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (psdev->dev == dev) {
-+			found_psdev = psdev;
-+			break;
++		if (map_op.status) {
++			DPRINTK(" Grant table operation failure !\n");
++			return 0;
 +		}
-+	}
 +
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++		tocopy = min_t(size_t, size - offset, PAGE_SIZE);
 +
-+	if (found_psdev) {
-+		dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
-+			found_psdev->pdev);
++		if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
++					      (tx->addr & ~PAGE_MASK)),
++				     &data[offset], tocopy, isuserbuffer)) {
++			tpmif_put(tpmif);
++			return -EFAULT;
++		}
++		tx->size = tocopy;
 +
-+		if (found_psdev->pdev) {
-+			printk(KERN_WARNING "pciback: ****** removing device "
-+			       "%s while still in-use! ******\n",
-+			       pci_name(found_psdev->dev));
-+			printk(KERN_WARNING "pciback: ****** driver domain may "
-+			       "still access this device's i/o resources!\n");
-+			printk(KERN_WARNING "pciback: ****** shutdown driver "
-+			       "domain before binding device\n");
-+			printk(KERN_WARNING "pciback: ****** to other drivers "
-+			       "or domains\n");
++		gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++				    GNTMAP_host_map, handle);
 +
-+			pciback_release_pci_dev(found_psdev->pdev,
-+						found_psdev->dev);
++		if (unlikely
++		    (HYPERVISOR_grant_table_op
++		     (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++			BUG();
 +		}
 +
-+		spin_lock_irqsave(&pcistub_devices_lock, flags);
-+		list_del(&found_psdev->dev_list);
-+		spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+		/* the final put for releasing from the list */
-+		pcistub_device_put(found_psdev);
++		offset += tocopy;
++		i++;
 +	}
-+}
 +
-+static struct pci_device_id pcistub_ids[] = {
-+	{
-+	 .vendor = PCI_ANY_ID,
-+	 .device = PCI_ANY_ID,
-+	 .subvendor = PCI_ANY_ID,
-+	 .subdevice = PCI_ANY_ID,
-+	 },
-+	{0,},
-+};
++	rc = offset;
++	DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
++	notify_remote_via_irq(tpmif->irq);
++
++	return rc;
++}
 +
 +/*
-+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
-+ * for a normal device. I don't want it to be loaded automatically.
++ * Read data from the shared memory and copy it directly into the
++ * provided buffer. Advance the read_last indicator which tells
++ * how many bytes have already been read.
 + */
++static int packet_read(struct packet *pak, size_t numbytes,
++		       char *buffer, size_t buffersize, int isuserbuffer)
++{
++	tpmif_t *tpmif = pak->tpmif;
 +
-+static struct pci_driver pciback_pci_driver = {
-+	.name = "pciback",
-+	.id_table = pcistub_ids,
-+	.probe = pcistub_probe,
-+	.remove = pcistub_remove,
-+};
++	/*
++	 * Read 'numbytes' of data from the buffer. The first 4
++	 * bytes are the instance number in network byte order,
++	 * after that come the data from the shared memory buffer.
++	 */
++	u32 to_copy;
++	u32 offset = 0;
++	u32 room_left = buffersize;
 +
-+static inline int str_to_slot(const char *buf, int *domain, int *bus,
-+			      int *slot, int *func)
-+{
-+	int err;
++	if (pak->last_read < 4) {
++		/*
++		 * copy the instance number into the buffer
++		 */
++		u32 instance_no = htonl(pak->tpm_instance);
++		u32 last_read = pak->last_read;
 +
-+	err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
-+	if (err == 4)
-+		return 0;
-+	else if (err < 0)
-+		return -EINVAL;
++		to_copy = min_t(size_t, 4 - last_read, numbytes);
 +
-+	/* try again without domain */
-+	*domain = 0;
-+	err = sscanf(buf, " %x:%x.%x", bus, slot, func);
-+	if (err == 3)
-+		return 0;
++		if (copy_to_buffer(&buffer[0],
++				   &(((u8 *) & instance_no)[last_read]),
++				   to_copy, isuserbuffer)) {
++			return -EFAULT;
++		}
 +
-+	return -EINVAL;
-+}
++		pak->last_read += to_copy;
++		offset += to_copy;
++		room_left -= to_copy;
++	}
 +
-+static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
-+			       *slot, int *func, int *reg, int *size, int *mask)
-+{
-+	int err;
++	/*
++	 * If the packet has a data buffer appended, read from it...
++	 */
 +
-+	err =
-+	    sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
-+		   func, reg, size, mask);
-+	if (err == 7)
-+		return 0;
-+	return -EINVAL;
++	if (room_left > 0) {
++		if (pak->data_buffer) {
++			u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
++			u32 last_read = pak->last_read - 4;
++
++			if (copy_to_buffer(&buffer[offset],
++					   &pak->data_buffer[last_read],
++					   to_copy, isuserbuffer)) {
++				return -EFAULT;
++			}
++			pak->last_read += to_copy;
++			offset += to_copy;
++		} else {
++			offset = packet_read_shmem(pak,
++						   tpmif,
++						   offset,
++						   buffer,
++						   isuserbuffer, room_left);
++		}
++	}
++	return offset;
 +}
 +
-+static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++static int packet_read_shmem(struct packet *pak,
++			     tpmif_t * tpmif,
++			     u32 offset, char *buffer, int isuserbuffer,
++			     u32 room_left)
 +{
-+	struct pcistub_device_id *pci_dev_id;
-+	unsigned long flags;
++	u32 last_read = pak->last_read - 4;
++	u32 i = (last_read / PAGE_SIZE);
++	u32 pg_offset = last_read & (PAGE_SIZE - 1);
++	u32 to_copy;
++	grant_handle_t handle;
 +
-+	pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
-+	if (!pci_dev_id)
-+		return -ENOMEM;
++	tpmif_tx_request_t *tx;
 +
-+	pci_dev_id->domain = domain;
-+	pci_dev_id->bus = bus;
-+	pci_dev_id->devfn = PCI_DEVFN(slot, func);
++	tx = &tpmif->tx->ring[0].req;
++	/*
++	 * Start copying data at the page with index 'index'
++	 * and within that page at offset 'offset'.
++	 * Copy a maximum of 'room_left' bytes.
++	 */
++	to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
++	while (to_copy > 0) {
++		void *src;
++		struct gnttab_map_grant_ref map_op;
++		struct gnttab_unmap_grant_ref unmap_op;
 +
-+	pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
-+		 domain, bus, slot, func);
++		tx = &tpmif->tx->ring[i].req;
 +
-+	spin_lock_irqsave(&device_ids_lock, flags);
-+	list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
-+	spin_unlock_irqrestore(&device_ids_lock, flags);
++		gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++				  GNTMAP_host_map, tx->ref, tpmif->domid);
 +
-+	return 0;
-+}
++		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++						       &map_op, 1))) {
++			BUG();
++		}
 +
-+static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
-+{
-+	struct pcistub_device_id *pci_dev_id, *t;
-+	int devfn = PCI_DEVFN(slot, func);
-+	int err = -ENOENT;
-+	unsigned long flags;
++		if (map_op.status) {
++			DPRINTK(" Grant table operation failure !\n");
++			return -EFAULT;
++		}
 +
-+	spin_lock_irqsave(&device_ids_lock, flags);
-+	list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
++		handle = map_op.handle;
 +
-+		if (pci_dev_id->domain == domain
-+		    && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
-+			/* Don't break; here because it's possible the same
-+			 * slot could be in the list more than once
++		if (to_copy > tx->size) {
++			/*
++			 * User requests more than what's available
 +			 */
-+			list_del(&pci_dev_id->slot_list);
-+			kfree(pci_dev_id);
++			to_copy = min_t(u32, tx->size, to_copy);
++		}
 +
-+			err = 0;
++		DPRINTK("Copying from mapped memory at %08lx\n",
++			(unsigned long)(idx_to_kaddr(tpmif, i) |
++					(tx->addr & ~PAGE_MASK)));
 +
-+			pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
-+				 "seize list\n", domain, bus, slot, func);
++		src = (void *)(idx_to_kaddr(tpmif, i) |
++			       ((tx->addr & ~PAGE_MASK) + pg_offset));
++		if (copy_to_buffer(&buffer[offset],
++				   src, to_copy, isuserbuffer)) {
++			return -EFAULT;
 +		}
-+	}
-+	spin_unlock_irqrestore(&device_ids_lock, flags);
 +
-+	return err;
-+}
-+
-+static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
-+			   int size, int mask)
-+{
-+	int err = 0;
-+	struct pcistub_device *psdev;
-+	struct pci_dev *dev;
-+	struct config_field *field;
++		DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
++			tpmif->domid, buffer[offset], buffer[offset + 1],
++			buffer[offset + 2], buffer[offset + 3]);
 +
-+	psdev = pcistub_device_find(domain, bus, slot, func);
-+	if (!psdev || !psdev->dev) {
-+		err = -ENODEV;
-+		goto out;
-+	}
-+	dev = psdev->dev;
++		gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++				    GNTMAP_host_map, handle);
 +
-+	field = kzalloc(sizeof(*field), GFP_ATOMIC);
-+	if (!field) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
++		if (unlikely
++		    (HYPERVISOR_grant_table_op
++		     (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++			BUG();
++		}
 +
-+	field->offset = reg;
-+	field->size = size;
-+	field->mask = mask;
-+	field->init = NULL;
-+	field->reset = NULL;
-+	field->release = NULL;
-+	field->clean = pciback_config_field_free;
++		offset += to_copy;
++		pg_offset = 0;
++		last_read += to_copy;
++		room_left -= to_copy;
 +
-+	err = pciback_config_quirks_add_field(dev, field);
-+	if (err)
-+		kfree(field);
-+      out:
-+	return err;
++		to_copy = min_t(u32, PAGE_SIZE, room_left);
++		i++;
++	}			/* while (to_copy > 0) */
++	/*
++	 * Adjust the last_read pointer
++	 */
++	pak->last_read = last_read + 4;
++	return offset;
 +}
 +
-+static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
-+				size_t count)
++/* ============================================================
++ * The file layer for reading data from this device
++ * ============================================================
++ */
++static int vtpm_op_open(struct inode *inode, struct file *f)
 +{
-+	int domain, bus, slot, func;
-+	int err;
-+
-+	err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+	if (err)
-+		goto out;
-+
-+	err = pcistub_device_id_add(domain, bus, slot, func);
++	int rc = 0;
++	unsigned long flags;
 +
-+      out:
-+	if (!err)
-+		err = count;
-+	return err;
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	if (dataex.has_opener == 0) {
++		dataex.has_opener = 1;
++	} else {
++		rc = -EPERM;
++	}
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	return rc;
 +}
 +
-+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
-+
-+static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
-+				   size_t count)
++static ssize_t vtpm_op_read(struct file *file,
++			    char __user * data, size_t size, loff_t * offset)
 +{
-+	int domain, bus, slot, func;
-+	int err;
++	int ret_size = -ENODATA;
++	struct packet *pak = NULL;
++	unsigned long flags;
 +
-+	err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+	if (err)
-+		goto out;
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	if (dataex.aborted) {
++		dataex.aborted = 0;
++		dataex.copied_so_far = 0;
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
++		return -EIO;
++	}
 +
-+	err = pcistub_device_id_remove(domain, bus, slot, func);
++	if (list_empty(&dataex.pending_pak)) {
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
++		wait_event_interruptible(dataex.wait_queue,
++					 !list_empty(&dataex.pending_pak));
++		write_lock_irqsave(&dataex.pak_lock, flags);
++		dataex.copied_so_far = 0;
++	}
 +
-+      out:
-+	if (!err)
-+		err = count;
-+	return err;
-+}
++	if (!list_empty(&dataex.pending_pak)) {
++		unsigned int left;
 +
-+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
++		pak = list_entry(dataex.pending_pak.next, struct packet, next);
++		left = pak->data_len - dataex.copied_so_far;
++		list_del(&pak->next);
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
 +
-+static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
-+{
-+	struct pcistub_device_id *pci_dev_id;
-+	size_t count = 0;
-+	unsigned long flags;
++		DPRINTK("size given by app: %d, available: %d\n", size, left);
 +
-+	spin_lock_irqsave(&device_ids_lock, flags);
-+	list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
-+		if (count >= PAGE_SIZE)
-+			break;
++		ret_size = min_t(size_t, size, left);
 +
-+		count += scnprintf(buf + count, PAGE_SIZE - count,
-+				   "%04x:%02x:%02x.%01x\n",
-+				   pci_dev_id->domain, pci_dev_id->bus,
-+				   PCI_SLOT(pci_dev_id->devfn),
-+				   PCI_FUNC(pci_dev_id->devfn));
-+	}
-+	spin_unlock_irqrestore(&device_ids_lock, flags);
++		ret_size = packet_read(pak, ret_size, data, size, 1);
 +
-+	return count;
-+}
++		write_lock_irqsave(&dataex.pak_lock, flags);
 +
-+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
++		if (ret_size < 0) {
++			del_singleshot_timer_sync(&pak->processing_timer);
++			packet_free(pak);
++			dataex.copied_so_far = 0;
++		} else {
++			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
 +
-+static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
-+				 size_t count)
-+{
-+	int domain, bus, slot, func, reg, size, mask;
-+	int err;
++			dataex.copied_so_far += ret_size;
++			if (dataex.copied_so_far >= pak->data_len + 4) {
++				DPRINTK("All data from this packet given to app.\n");
++				/* All data given to app */
 +
-+	err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
-+			   &mask);
-+	if (err)
-+		goto out;
++				del_singleshot_timer_sync(&pak->
++							  processing_timer);
++				list_add_tail(&pak->next, &dataex.current_pak);
++				/*
++				 * The more fontends that are handled at the same time,
++				 * the more time we give the TPM to process the request.
++				 */
++				mod_timer(&pak->processing_timer,
++					  jiffies + (num_frontends * 60 * HZ));
++				dataex.copied_so_far = 0;
++			} else {
++				list_add(&pak->next, &dataex.pending_pak);
++			}
++		}
++	}
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
 +
-+	err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
++	DPRINTK("Returning result from read to app: %d\n", ret_size);
 +
-+      out:
-+	if (!err)
-+		err = count;
-+	return err;
++	return ret_size;
 +}
 +
-+static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
++/*
++ * Write operation - only works after a previous read operation!
++ */
++static ssize_t vtpm_op_write(struct file *file,
++			     const char __user * data, size_t size,
++			     loff_t * offset)
 +{
-+	int count = 0;
-+	unsigned long flags;
-+	extern struct list_head pciback_quirks;
-+	struct pciback_config_quirk *quirk;
-+	struct pciback_dev_data *dev_data;
-+	struct config_field *field;
-+	struct config_field_entry *cfg_entry;
++	struct packet *pak;
++	int rc = 0;
++	unsigned int off = 4;
++	unsigned long flags;
++	struct vtpm_resp_hdr vrh;
 +
-+	spin_lock_irqsave(&device_ids_lock, flags);
-+	list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
-+		if (count >= PAGE_SIZE)
-+			goto out;
++	/*
++	 * Minimum required packet size is:
++	 * 4 bytes for instance number
++	 * 2 bytes for tag
++	 * 4 bytes for paramSize
++	 * 4 bytes for the ordinal
++	 * sum: 14 bytes
++	 */
++	if (size < sizeof (vrh))
++		return -EFAULT;
 +
-+		count += scnprintf(buf + count, PAGE_SIZE - count,
-+				   "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
-+				   quirk->pdev->bus->number,
-+				   PCI_SLOT(quirk->pdev->devfn),
-+				   PCI_FUNC(quirk->pdev->devfn),
-+				   quirk->devid.vendor, quirk->devid.device,
-+				   quirk->devid.subvendor,
-+				   quirk->devid.subdevice);
++	if (copy_from_user(&vrh, data, sizeof (vrh)))
++		return -EFAULT;
 +
-+		dev_data = pci_get_drvdata(quirk->pdev);
++	/* malformed packet? */
++	if ((off + ntohl(vrh.len_no)) != size)
++		return -EFAULT;
 +
-+		list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+			field = cfg_entry->field;
-+			if (count >= PAGE_SIZE)
-+				goto out;
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	pak = packet_find_instance(&dataex.current_pak,
++				   ntohl(vrh.instance_no));
 +
-+			count += scnprintf(buf + count, PAGE_SIZE - count,
-+					   "\t\t%08x:%01x:%08x\n",
-+					   cfg_entry->base_offset + field->offset, 
-+					   field->size, field->mask);
-+		}
++	if (pak == NULL) {
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
++		DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
++		        ntohl(vrh.instance_no));
++		return -EFAULT;
 +	}
 +
-+      out:
-+	spin_unlock_irqrestore(&device_ids_lock, flags);
++	del_singleshot_timer_sync(&pak->processing_timer);
++	list_del(&pak->next);
 +
-+	return count;
-+}
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
 +
-+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
++	/*
++	 * The first 'offset' bytes must be the instance number - skip them.
++	 */
++	size -= off;
 +
-+static ssize_t permissive_add(struct device_driver *drv, const char *buf,
-+			      size_t count)
-+{
-+	int domain, bus, slot, func;
-+	int err;
-+	struct pcistub_device *psdev;
-+	struct pciback_dev_data *dev_data;
-+	err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+	if (err)
-+		goto out;
-+	psdev = pcistub_device_find(domain, bus, slot, func);
-+	if (!psdev) {
-+		err = -ENODEV;
-+		goto out;
-+	}
-+	if (!psdev->dev) {
-+		err = -ENODEV;
-+		goto release;
-+	}
-+	dev_data = pci_get_drvdata(psdev->dev);
-+	/* the driver data for a device should never be null at this point */
-+	if (!dev_data) {
-+		err = -ENXIO;
-+		goto release;
-+	}
-+	if (!dev_data->permissive) {
-+		dev_data->permissive = 1;
-+		/* Let user know that what they're doing could be unsafe */
-+		dev_warn(&psdev->dev->dev,
-+			 "enabling permissive mode configuration space accesses!\n");
-+		dev_warn(&psdev->dev->dev,
-+			 "permissive mode is potentially unsafe!\n");
++	rc = packet_write(pak, &data[off], size, 1);
++
++	if (rc > 0) {
++		/* I neglected the first 4 bytes */
++		rc += off;
 +	}
-+      release:
-+	pcistub_device_put(psdev);
-+      out:
-+	if (!err)
-+		err = count;
-+	return err;
++	packet_free(pak);
++	return rc;
 +}
 +
-+static ssize_t permissive_show(struct device_driver *drv, char *buf)
++static int vtpm_op_release(struct inode *inode, struct file *file)
 +{
-+	struct pcistub_device *psdev;
-+	struct pciback_dev_data *dev_data;
-+	size_t count = 0;
 +	unsigned long flags;
-+	spin_lock_irqsave(&pcistub_devices_lock, flags);
-+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+		if (count >= PAGE_SIZE)
-+			break;
-+		if (!psdev->dev)
-+			continue;
-+		dev_data = pci_get_drvdata(psdev->dev);
-+		if (!dev_data || !dev_data->permissive)
-+			continue;
-+		count +=
-+		    scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
-+			      pci_name(psdev->dev));
-+	}
-+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+	return count;
-+}
 +
-+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
++	vtpm_release_packets(NULL, 1);
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	dataex.has_opener = 0;
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	return 0;
++}
 +
-+static void pcistub_exit(void)
++static unsigned int vtpm_op_poll(struct file *file,
++				 struct poll_table_struct *pts)
 +{
-+	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
-+	driver_remove_file(&pciback_pci_driver.driver,
-+			   &driver_attr_remove_slot);
-+	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
-+	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
-+	driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
++	unsigned int flags = POLLOUT | POLLWRNORM;
 +
-+	pci_unregister_driver(&pciback_pci_driver);
++	poll_wait(file, &dataex.wait_queue, pts);
++	if (!list_empty(&dataex.pending_pak)) {
++		flags |= POLLIN | POLLRDNORM;
++	}
++	return flags;
 +}
 +
-+static int __init pcistub_init(void)
-+{
-+	int pos = 0;
-+	int err = 0;
-+	int domain, bus, slot, func;
-+	int parsed;
-+
-+	if (pci_devs_to_hide && *pci_devs_to_hide) {
-+		do {
-+			parsed = 0;
++static const struct file_operations vtpm_ops = {
++	.owner = THIS_MODULE,
++	.llseek = no_llseek,
++	.open = vtpm_op_open,
++	.read = vtpm_op_read,
++	.write = vtpm_op_write,
++	.release = vtpm_op_release,
++	.poll = vtpm_op_poll,
++};
 +
-+			err = sscanf(pci_devs_to_hide + pos,
-+				     " (%x:%x:%x.%x) %n",
-+				     &domain, &bus, &slot, &func, &parsed);
-+			if (err != 4) {
-+				domain = 0;
-+				err = sscanf(pci_devs_to_hide + pos,
-+					     " (%x:%x.%x) %n",
-+					     &bus, &slot, &func, &parsed);
-+				if (err != 3)
-+					goto parse_error;
-+			}
++static struct miscdevice vtpms_miscdevice = {
++	.minor = 225,
++	.name = "vtpm",
++	.fops = &vtpm_ops,
++};
 +
-+			err = pcistub_device_id_add(domain, bus, slot, func);
-+			if (err)
-+				goto out;
++/***************************************************************
++ Utility functions
++***************************************************************/
 +
-+			/* if parsed<=0, we've reached the end of the string */
-+			pos += parsed;
-+		} while (parsed > 0 && pci_devs_to_hide[pos]);
-+	}
++static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
++{
++	int rc;
++	static const unsigned char tpm_error_message_fail[] = {
++		0x00, 0x00,
++		0x00, 0x00, 0x00, 0x0a,
++		0x00, 0x00, 0x00, 0x09	/* TPM_FAIL */
++	};
++	unsigned char buffer[sizeof (tpm_error_message_fail)];
 +
-+	/* If we're the first PCI Device Driver to register, we're the
-+	 * first one to get offered PCI devices as they become
-+	 * available (and thus we can be the first to grab them)
++	memcpy(buffer, tpm_error_message_fail,
++	       sizeof (tpm_error_message_fail));
++	/*
++	 * Insert the right response tag depending on the given tag
++	 * All response tags are '+3' to the request tag.
 +	 */
-+	err = pci_register_driver(&pciback_pci_driver);
-+	if (err < 0)
-+		goto out;
-+
-+	err = driver_create_file(&pciback_pci_driver.driver,
-+				 &driver_attr_new_slot);
-+	if (!err)
-+		err = driver_create_file(&pciback_pci_driver.driver,
-+					 &driver_attr_remove_slot);
-+	if (!err)
-+		err = driver_create_file(&pciback_pci_driver.driver,
-+					 &driver_attr_slots);
-+	if (!err)
-+		err = driver_create_file(&pciback_pci_driver.driver,
-+					 &driver_attr_quirks);
-+	if (!err)
-+		err = driver_create_file(&pciback_pci_driver.driver,
-+					 &driver_attr_permissive);
-+
-+	if (err)
-+		pcistub_exit();
++	buffer[1] = req_tag + 3;
 +
-+      out:
-+	return err;
++	/*
++	 * Write the data to shared memory and notify the front-end
++	 */
++	rc = packet_write(pak, buffer, sizeof (buffer), 0);
 +
-+      parse_error:
-+	printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
-+	       pci_devs_to_hide + pos);
-+	return -EINVAL;
++	return rc;
 +}
 +
-+#ifndef MODULE
-+/*
-+ * fs_initcall happens before device_initcall
-+ * so pciback *should* get called first (b/c we 
-+ * want to suck up any device before other drivers
-+ * get a chance by being the first pci device
-+ * driver to register)
-+ */
-+fs_initcall(pcistub_init);
-+#endif
-+
-+static int __init pciback_init(void)
++static int _vtpm_release_packets(struct list_head *head,
++				 tpmif_t * tpmif, int send_msgs)
 +{
-+	int err;
++	int aborted = 0;
++	int c = 0;
++	struct packet *pak;
++	struct list_head *pos, *tmp;
 +
-+	err = pciback_config_init();
-+	if (err)
-+		return err;
++	list_for_each_safe(pos, tmp, head) {
++		pak = list_entry(pos, struct packet, next);
++		c += 1;
 +
-+#ifdef MODULE
-+	err = pcistub_init();
-+	if (err < 0)
-+		return err;
-+#endif
++		if (tpmif == NULL || pak->tpmif == tpmif) {
++			int can_send = 0;
 +
-+	pcistub_init_devices_late();
-+	err = pciback_xenbus_register();
-+	if (err)
-+		pcistub_exit();
++			del_singleshot_timer_sync(&pak->processing_timer);
++			list_del(&pak->next);
 +
-+	return err;
++			if (pak->tpmif && pak->tpmif->status == CONNECTED) {
++				can_send = 1;
++			}
++
++			if (send_msgs && can_send) {
++				tpm_send_fail_message(pak, pak->req_tag);
++			}
++			packet_free(pak);
++			if (c == 1)
++				aborted = 1;
++		}
++	}
++	return aborted;
 +}
 +
-+static void __exit pciback_cleanup(void)
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
 +{
-+	pciback_xenbus_unregister();
-+	pcistub_exit();
-+}
++	unsigned long flags;
 +
-+module_init(pciback_init);
-+module_exit(pciback_cleanup);
++	write_lock_irqsave(&dataex.pak_lock, flags);
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/slot.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/slot.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/slot.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/slot.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,151 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ *               to the frontend
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil> (vpci.c)
-+ *   Author: Tristan Gingold <tristan.gingold at bull.net>, from vpci.c
-+ */
++	dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
++					       tpmif,
++					       send_msgs);
++	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
 +
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	return 0;
++}
 +
-+/* There are at most 32 slots in a pci bus.  */
-+#define PCI_SLOT_MAX 32
++static int vtpm_queue_packet(struct packet *pak)
++{
++	int rc = 0;
 +
-+#define PCI_BUS_NBR 2
++	if (dataex.has_opener) {
++		unsigned long flags;
 +
-+struct slot_dev_data {
-+	/* Access to dev_list must be protected by lock */
-+	struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
-+	spinlock_t lock;
-+};
++		write_lock_irqsave(&dataex.pak_lock, flags);
++		list_add_tail(&pak->next, &dataex.pending_pak);
++		/* give the TPM some time to pick up the request */
++		mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
 +
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus,
-+				    unsigned int devfn)
-+{
-+	struct pci_dev *dev = NULL;
-+	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+	unsigned long flags;
++		wake_up_interruptible(&dataex.wait_queue);
++	} else {
++		rc = -EFAULT;
++	}
++	return rc;
++}
 +
-+	if (domain != 0 || PCI_FUNC(devfn) != 0)
-+		return NULL;
++static int vtpm_receive(tpmif_t * tpmif, u32 size)
++{
++	int rc = 0;
++	unsigned char buffer[10];
++	__be32 *native_size;
++	struct packet *pak = packet_alloc(tpmif, size, 0, 0);
 +
-+	if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
-+		return NULL;
++	if (!pak)
++		return -ENOMEM;
++	/*
++	 * Read 10 bytes from the received buffer to test its
++	 * content for validity.
++	 */
++	if (sizeof (buffer) != packet_read(pak,
++					   sizeof (buffer), buffer,
++					   sizeof (buffer), 0)) {
++		goto failexit;
++	}
++	/*
++	 * Reset the packet read pointer so we can read all its
++	 * contents again.
++	 */
++	packet_reset(pak);
 +
-+	spin_lock_irqsave(&slot_dev->lock, flags);
-+	dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
-+	spin_unlock_irqrestore(&slot_dev->lock, flags);
++	native_size = (__force __be32 *) (&buffer[4 + 2]);
++	/*
++	 * Verify that the size of the packet is correct
++	 * as indicated and that there's actually someone reading packets.
++	 * The minimum size of the packet is '10' for tag, size indicator
++	 * and ordinal.
++	 */
++	if (size < 10 ||
++	    be32_to_cpu(*native_size) != size ||
++	    0 == dataex.has_opener || tpmif->status != CONNECTED) {
++		rc = -EINVAL;
++		goto failexit;
++	} else {
++		rc = vtpm_queue_packet(pak);
++		if (rc < 0)
++			goto failexit;
++	}
++	return 0;
 +
-+	return dev;
++      failexit:
++	if (pak) {
++		tpm_send_fail_message(pak, buffer[4 + 1]);
++		packet_free(pak);
++	}
++	return rc;
 +}
 +
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++/*
++ * Timeout function that gets invoked when a packet has not been processed
++ * during the timeout period.
++ * The packet must be on a list when this function is invoked. This
++ * also means that once its taken off a list, the timer must be
++ * destroyed as well.
++ */
++static void processing_timeout(unsigned long ptr)
 +{
-+	int err = 0, slot, bus;
-+	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++	struct packet *pak = (struct packet *)ptr;
 +	unsigned long flags;
 +
-+	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+		err = -EFAULT;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Can't export bridges on the virtual PCI bus");
-+		goto out;
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	/*
++	 * The packet needs to be searched whether it
++	 * is still on the list.
++	 */
++	if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
++	    pak == packet_find_packet(&dataex.current_pak, pak)) {
++		if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
++			tpm_send_fail_message(pak, pak->req_tag);
++		}
++		/* discard future responses */
++		pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
 +	}
 +
-+	spin_lock_irqsave(&slot_dev->lock, flags);
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++}
 +
-+	/* Assign to a new slot on the virtual PCI bus */
-+	for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+			if (slot_dev->slots[bus][slot] == NULL) {
-+				printk(KERN_INFO
-+				       "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
-+				       pci_name(dev), slot, bus);
-+				slot_dev->slots[bus][slot] = dev;
-+				goto unlock;
-+			}
-+		}
++static void tpm_tx_action(unsigned long unused);
++static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
 +
-+	err = -ENOMEM;
-+	xenbus_dev_fatal(pdev->xdev, err,
-+			 "No more space on root virtual PCI bus");
++static struct list_head tpm_schedule_list;
++static spinlock_t tpm_schedule_list_lock;
 +
-+      unlock:
-+	spin_unlock_irqrestore(&slot_dev->lock, flags);
-+      out:
-+	return err;
++static inline void maybe_schedule_tx_action(void)
++{
++	smp_mb();
++	tasklet_schedule(&tpm_tx_tasklet);
 +}
 +
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
 +{
-+	int slot, bus;
-+	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
++	return tpmif->list.next != NULL;
++}
 +
-+	spin_lock_irqsave(&slot_dev->lock, flags);
++static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
++{
++	spin_lock_irq(&tpm_schedule_list_lock);
++	if (likely(__on_tpm_schedule_list(tpmif))) {
++		list_del(&tpmif->list);
++		tpmif->list.next = NULL;
++		tpmif_put(tpmif);
++	}
++	spin_unlock_irq(&tpm_schedule_list_lock);
++}
 +
-+	for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+			if (slot_dev->slots[bus][slot] == dev) {
-+				slot_dev->slots[bus][slot] = NULL;
-+				found_dev = dev;
-+				goto out;
-+			}
-+		}
++static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
++{
++	if (__on_tpm_schedule_list(tpmif))
++		return;
 +
-+      out:
-+	spin_unlock_irqrestore(&slot_dev->lock, flags);
++	spin_lock_irq(&tpm_schedule_list_lock);
++	if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
++		list_add_tail(&tpmif->list, &tpm_schedule_list);
++		tpmif_get(tpmif);
++	}
++	spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++void tpmif_schedule_work(tpmif_t * tpmif)
++{
++	add_to_tpm_schedule_list_tail(tpmif);
++	maybe_schedule_tx_action();
++}
 +
-+	if (found_dev)
-+		pcistub_put_pci_dev(found_dev);
++void tpmif_deschedule_work(tpmif_t * tpmif)
++{
++	remove_from_tpm_schedule_list(tpmif);
 +}
 +
-+int pciback_init_devices(struct pciback_device *pdev)
++static void tpm_tx_action(unsigned long unused)
 +{
-+	int slot, bus;
-+	struct slot_dev_data *slot_dev;
++	struct list_head *ent;
++	tpmif_t *tpmif;
++	tpmif_tx_request_t *tx;
 +
-+	slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
-+	if (!slot_dev)
-+		return -ENOMEM;
++	DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
 +
-+	spin_lock_init(&slot_dev->lock);
++	while (!list_empty(&tpm_schedule_list)) {
++		/* Get a tpmif from the list with work to do. */
++		ent = tpm_schedule_list.next;
++		tpmif = list_entry(ent, tpmif_t, list);
++		tpmif_get(tpmif);
++		remove_from_tpm_schedule_list(tpmif);
 +
-+	for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+		for (slot = 0; slot < PCI_SLOT_MAX; slot++)
-+			slot_dev->slots[bus][slot] = NULL;
++		tx = &tpmif->tx->ring[0].req;
 +
-+	pdev->pci_dev_data = slot_dev;
++		/* pass it up */
++		vtpm_receive(tpmif, tx->size);
 +
-+	return 0;
++		tpmif_put(tpmif);
++	}
 +}
 +
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+			      publish_pci_root_cb publish_cb)
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 +{
-+	/* The Virtual PCI bus has only one root */
-+	return publish_cb(pdev, 0, 0);
++	tpmif_t *tpmif = (tpmif_t *) dev_id;
++
++	add_to_tpm_schedule_list_tail(tpmif);
++	maybe_schedule_tx_action();
++	return IRQ_HANDLED;
 +}
 +
-+void pciback_release_devices(struct pciback_device *pdev)
++static int __init tpmback_init(void)
 +{
-+	int slot, bus;
-+	struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+	struct pci_dev *dev;
++	int rc;
 +
-+	for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+			dev = slot_dev->slots[bus][slot];
-+			if (dev != NULL)
-+				pcistub_put_pci_dev(dev);
-+		}
++	if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
++		printk(KERN_ALERT
++		       "Could not register misc device for TPM BE.\n");
++		return rc;
++	}
 +
-+	kfree(slot_dev);
-+	pdev->pci_dev_data = NULL;
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/vpci.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/vpci.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/vpci.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/vpci.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,204 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ *               to the frontend
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
++	dataex_init(&dataex);
 +
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++	spin_lock_init(&tpm_schedule_list_lock);
++	INIT_LIST_HEAD(&tpm_schedule_list);
 +
-+#define PCI_SLOT_MAX 32
++	tpmif_interface_init();
++	tpmif_xenbus_init();
 +
-+struct vpci_dev_data {
-+	/* Access to dev_list must be protected by lock */
-+	struct list_head dev_list[PCI_SLOT_MAX];
-+	spinlock_t lock;
-+};
++	printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
 +
-+static inline struct list_head *list_first(struct list_head *head)
++	return 0;
++}
++
++module_init(tpmback_init);
++
++void __exit tpmback_exit(void)
 +{
-+	return head->next;
++	vtpm_release_packets(NULL, 0);
++	tpmif_xenbus_exit();
++	tpmif_interface_exit();
++	misc_deregister(&vtpms_miscdevice);
 +}
 +
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus,
-+				    unsigned int devfn)
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/tpmback/xenbus.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/tpmback/xenbus.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,289 @@
++/*  Xenbus code for tpmif backend
++    Copyright (C) 2005 IBM Corporation
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
 +{
-+	struct pci_dev_entry *entry;
-+	struct pci_dev *dev = NULL;
-+	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+	unsigned long flags;
++	struct xenbus_device *dev;
 +
-+	if (domain != 0 || bus != 0)
-+		return NULL;
++	/* our communications channel */
++	tpmif_t *tpmif;
 +
-+	if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
-+		spin_lock_irqsave(&vpci_dev->lock, flags);
++	long int frontend_id;
++	long int instance; // instance of TPM
++	u8 is_instance_set;// whether instance number has been set
 +
-+		list_for_each_entry(entry,
-+				    &vpci_dev->dev_list[PCI_SLOT(devfn)],
-+				    list) {
-+			if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
-+				dev = entry->dev;
-+				break;
-+			}
-+		}
++	/* watch front end for changes */
++	struct xenbus_watch backend_watch;
++};
 +
-+		spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+	}
-+	return dev;
++static void maybe_connect(struct backend_info *be);
++static void connect(struct backend_info *be);
++static int connect_ring(struct backend_info *be);
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len);
++static void frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state);
++
++long int tpmback_get_instance(struct backend_info *bi)
++{
++	long int res = -1;
++	if (bi && bi->is_instance_set)
++		res = bi->instance;
++	return res;
 +}
 +
-+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
++static int tpmback_remove(struct xenbus_device *dev)
 +{
-+	if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
-+	    && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
-+		return 1;
++	struct backend_info *be = dev->dev.driver_data;
 +
++	if (!be) return 0;
++
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
++	if (be->tpmif) {
++		be->tpmif->bi = NULL;
++		vtpm_release_packets(be->tpmif, 0);
++		tpmif_put(be->tpmif);
++		be->tpmif = NULL;
++	}
++	kfree(be);
++	dev->dev.driver_data = NULL;
 +	return 0;
 +}
 +
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++static int tpmback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
 +{
-+	int err = 0, slot;
-+	struct pci_dev_entry *t, *dev_entry;
-+	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+	unsigned long flags;
++	int err;
++	struct backend_info *be = kzalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
 +
-+	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+		err = -EFAULT;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Can't export bridges on the virtual PCI bus");
-+		goto out;
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
 +	}
 +
-+	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+	if (!dev_entry) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error adding entry to virtual PCI bus");
-+		goto out;
++	be->is_instance_set = 0;
++	be->dev = dev;
++	dev->dev.driver_data = be;
++
++	err = xenbus_watch_path2(dev, dev->nodename,
++				 "instance", &be->backend_watch,
++				 backend_changed);
++	if (err) {
++		goto fail;
 +	}
 +
-+	dev_entry->dev = dev;
++	err = xenbus_switch_state(dev, XenbusStateInitWait);
++	if (err) {
++		goto fail;
++	}
++	return 0;
++fail:
++	tpmback_remove(dev);
++	return err;
++}
 +
-+	spin_lock_irqsave(&vpci_dev->lock, flags);
 +
-+	/* Keep multi-function devices together on the virtual PCI bus */
-+	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+		if (!list_empty(&vpci_dev->dev_list[slot])) {
-+			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
-+				       struct pci_dev_entry, list);
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	int err;
++	long instance;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
 +
-+			if (match_slot(dev, t->dev)) {
-+				pr_info("pciback: vpci: %s: "
-+					"assign to virtual slot %d func %d\n",
-+					pci_name(dev), slot,
-+					PCI_FUNC(dev->devfn));
-+				list_add_tail(&dev_entry->list,
-+					      &vpci_dev->dev_list[slot]);
-+				goto unlock;
-+			}
-+		}
++	err = xenbus_scanf(XBT_NIL, dev->nodename,
++			   "instance","%li", &instance);
++	if (XENBUS_EXIST_ERR(err)) {
++		return;
 +	}
 +
-+	/* Assign to a new slot on the virtual PCI bus */
-+	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+		if (list_empty(&vpci_dev->dev_list[slot])) {
-+			printk(KERN_INFO
-+			       "pciback: vpci: %s: assign to virtual slot %d\n",
-+			       pci_name(dev), slot);
-+			list_add_tail(&dev_entry->list,
-+				      &vpci_dev->dev_list[slot]);
-+			goto unlock;
-+		}
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading instance");
++		return;
 +	}
 +
-+	err = -ENOMEM;
-+	xenbus_dev_fatal(pdev->xdev, err,
-+			 "No more space on root virtual PCI bus");
-+
-+      unlock:
-+	spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+      out:
-+	return err;
++	if (be->is_instance_set == 0) {
++		be->instance = instance;
++		be->is_instance_set = 1;
++	}
 +}
 +
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++
++static void frontend_changed(struct xenbus_device *dev,
++			     enum xenbus_state frontend_state)
 +{
-+	int slot;
-+	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+	struct pci_dev *found_dev = NULL;
-+	unsigned long flags;
++	struct backend_info *be = dev->dev.driver_data;
++	int err;
 +
-+	spin_lock_irqsave(&vpci_dev->lock, flags);
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++		break;
 +
-+	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+		struct pci_dev_entry *e, *tmp;
-+		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+					 list) {
-+			if (e->dev == dev) {
-+				list_del(&e->list);
-+				found_dev = e->dev;
-+				kfree(e);
-+				goto out;
-+			}
++	case XenbusStateConnected:
++		err = connect_ring(be);
++		if (err) {
++			return;
 +		}
-+	}
++		maybe_connect(be);
++		break;
 +
-+      out:
-+	spin_unlock_irqrestore(&vpci_dev->lock, flags);
++	case XenbusStateClosing:
++		be->instance = -1;
++		xenbus_switch_state(dev, XenbusStateClosing);
++		break;
 +
-+	if (found_dev)
-+		pcistub_put_pci_dev(found_dev);
++	case XenbusStateUnknown: /* keep it here */
++	case XenbusStateClosed:
++		xenbus_switch_state(dev, XenbusStateClosed);
++		device_unregister(&be->dev->dev);
++		tpmback_remove(dev);
++		break;
++
++	default:
++		xenbus_dev_fatal(dev, -EINVAL,
++				 "saw state %d at frontend",
++				 frontend_state);
++		break;
++	}
 +}
 +
-+int pciback_init_devices(struct pciback_device *pdev)
++
++
++static void maybe_connect(struct backend_info *be)
 +{
-+	int slot;
-+	struct vpci_dev_data *vpci_dev;
++	if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
++		return;
 +
-+	vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
-+	if (!vpci_dev)
-+		return -ENOMEM;
++	connect(be);
++}
 +
-+	spin_lock_init(&vpci_dev->lock);
 +
-+	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+		INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
++static void connect(struct backend_info *be)
++{
++	struct xenbus_transaction xbt;
++	int err;
++	struct xenbus_device *dev = be->dev;
++	unsigned long ready = 1;
++
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(be->dev, err, "starting transaction");
++		return;
 +	}
 +
-+	pdev->pci_dev_data = vpci_dev;
++	err = xenbus_printf(xbt, be->dev->nodename,
++			    "ready", "%lu", ready);
++	if (err) {
++		xenbus_dev_fatal(be->dev, err, "writing 'ready'");
++		goto abort;
++	}
 +
-+	return 0;
-+}
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err)
++		xenbus_dev_fatal(be->dev, err, "end of transaction");
 +
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+			      publish_pci_root_cb publish_cb)
-+{
-+	/* The Virtual PCI bus has only one root */
-+	return publish_cb(pdev, 0, 0);
++	err = xenbus_switch_state(dev, XenbusStateConnected);
++	if (!err)
++		be->tpmif->status = CONNECTED;
++	return;
++abort:
++	xenbus_transaction_end(xbt, 1);
 +}
 +
-+void pciback_release_devices(struct pciback_device *pdev)
++
++static int connect_ring(struct backend_info *be)
 +{
-+	int slot;
-+	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	int err;
 +
-+	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+		struct pci_dev_entry *e, *tmp;
-+		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+					 list) {
-+			list_del(&e->list);
-+			pcistub_put_pci_dev(e->dev);
-+			kfree(e);
++	err = xenbus_gather(XBT_NIL, dev->otherend,
++			    "ring-ref", "%lu", &ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_error(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
++
++	if (!be->tpmif) {
++		be->tpmif = tpmif_find(dev->otherend_id, be);
++		if (IS_ERR(be->tpmif)) {
++			err = PTR_ERR(be->tpmif);
++			be->tpmif = NULL;
++			xenbus_dev_fatal(dev,err,"creating vtpm interface");
++			return err;
 +		}
 +	}
 +
-+	kfree(vpci_dev);
-+	pdev->pci_dev_data = NULL;
++	if (be->tpmif != NULL) {
++		err = tpmif_map(be->tpmif, ring_ref, evtchn);
++		if (err) {
++			xenbus_dev_error(dev, err,
++					 "mapping shared-frame %lu port %u",
++					 ring_ref, evtchn);
++			return err;
++		}
++	}
++	return 0;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pciback/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/pciback/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/pciback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pciback/xenbus.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,454 @@
-+/*
-+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/vmalloc.h>
-+#include <xen/xenbus.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+#define INVALID_EVTCHN_IRQ  (-1)
 +
-+static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+	struct pciback_device *pdev;
-+
-+	pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
-+	if (pdev == NULL)
-+		goto out;
-+	dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
 +
-+	pdev->xdev = xdev;
-+	xdev->dev.driver_data = pdev;
++static const struct xenbus_device_id tpmback_ids[] = {
++	{ "vtpm" },
++	{ "" }
++};
 +
-+	spin_lock_init(&pdev->dev_lock);
 +
-+	pdev->sh_area = NULL;
-+	pdev->sh_info = NULL;
-+	pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
-+	pdev->be_watching = 0;
++static struct xenbus_driver tpmback = {
++	.name = "vtpm",
++	.owner = THIS_MODULE,
++	.ids = tpmback_ids,
++	.probe = tpmback_probe,
++	.remove = tpmback_remove,
++	.otherend_changed = frontend_changed,
++};
 +
-+	INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
 +
-+	if (pciback_init_devices(pdev)) {
-+		kfree(pdev);
-+		pdev = NULL;
-+	}
-+      out:
-+	return pdev;
++void tpmif_xenbus_init(void)
++{
++	xenbus_register_backend(&tpmback);
 +}
 +
-+static void free_pdev(struct pciback_device *pdev)
++void tpmif_xenbus_exit(void)
 +{
-+	if (pdev->be_watching)
-+		unregister_xenbus_watch(&pdev->be_watch);
-+
-+	/* Ensure the guest can't trigger our handler before removing devices */
-+	if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ)
-+		unbind_from_irqhandler(pdev->evtchn_irq, pdev);
++	xenbus_unregister_driver(&tpmback);
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/util.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/util.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,65 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
 +
-+	/* If the driver domain started an op, make sure we complete it or
-+	 * delete it before releasing the shared memory */
-+	cancel_delayed_work(&pdev->op_work);
-+	flush_scheduled_work();
++struct class *get_xen_class(void)
++{
++	static struct class *xen_class;
 +
-+	if (pdev->sh_info)
-+		xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
++	if (xen_class)
++		return xen_class;
 +
-+	pciback_release_devices(pdev);
++	xen_class = class_create(THIS_MODULE, "xen");
++	if (IS_ERR(xen_class)) {
++		printk("Failed to create xen sysfs class.\n");
++		xen_class = NULL;
++	}
 +
-+	pdev->xdev->dev.driver_data = NULL;
-+	pdev->xdev = NULL;
++	return xen_class;
++}
++EXPORT_SYMBOL_GPL(get_xen_class);
 +
-+	kfree(pdev);
++#ifdef CONFIG_X86
++static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++	/* apply_to_page_range() does all the hard work. */
++	return 0;
 +}
 +
-+static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
-+			     int remote_evtchn)
++struct vm_struct *alloc_vm_area(unsigned long size)
 +{
-+	int err = 0;
 +	struct vm_struct *area;
 +
-+	dev_dbg(&pdev->xdev->dev,
-+		"Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
-+		gnt_ref, remote_evtchn);
++	area = get_vm_area(size, VM_IOREMAP);
++	if (area == NULL)
++		return NULL;
 +
-+	area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
-+	if (IS_ERR(area)) {
-+		err = PTR_ERR(area);
-+		goto out;
++	/*
++	 * This ensures that page tables are constructed for this region
++	 * of kernel virtual address space and mapped into init_mm.
++	 */
++	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
++				area->size, f, NULL)) {
++		free_vm_area(area);
++		return NULL;
 +	}
-+	pdev->sh_area = area;
-+	pdev->sh_info = area->addr;
 +
-+	err = bind_interdomain_evtchn_to_irqhandler(
-+		pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
-+		SA_SAMPLE_RANDOM, "pciback", pdev);
-+	if (err < 0) {
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error binding event channel to IRQ");
-+		goto out;
-+	}
-+	pdev->evtchn_irq = err;
-+	err = 0;
++	/* Map page directories into every address space. */
++	vmalloc_sync_all();
 +
-+	dev_dbg(&pdev->xdev->dev, "Attached!\n");
-+      out:
-+	return err;
++	return area;
 +}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
 +
-+static int pciback_attach(struct pciback_device *pdev)
++void free_vm_area(struct vm_struct *area)
 +{
-+	int err = 0;
-+	int gnt_ref, remote_evtchn;
-+	char *magic = NULL;
++	struct vm_struct *ret;
++	ret = remove_vm_area(area->addr);
++	BUG_ON(ret != area);
++	kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++#endif /* CONFIG_X86 */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,9 @@
++obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
++obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
 +
-+	spin_lock(&pdev->dev_lock);
++xenbus_be-objs =
++xenbus_be-objs += xenbus_backend_client.o
 +
-+	/* Make sure we only do this setup once */
-+	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+	    XenbusStateInitialised)
-+		goto out;
++xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
++obj-y += $(xenbus-y) $(xenbus-m)
++obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_backend_client.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_backend_client.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,147 @@
++/******************************************************************************
++ * Backend-client-facing interface for the Xenbus driver.  In other words, the
++ * interface between the Xenbus and the device-specific code in the backend
++ * driver.
++ *
++ * Copyright (C) 2005-2006 XenSource Ltd
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	/* Wait for frontend to state that it has published the configuration */
-+	if (xenbus_read_driver_state(pdev->xdev->otherend) !=
-+	    XenbusStateInitialised)
-+		goto out;
++#include <linux/err.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
 +
-+	dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
++/* Based on Rusty Russell's skeleton driver's map_page */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
++{
++	struct gnttab_map_grant_ref op;
++	struct vm_struct *area;
 +
-+	err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
-+			    "pci-op-ref", "%u", &gnt_ref,
-+			    "event-channel", "%u", &remote_evtchn,
-+			    "magic", NULL, &magic, NULL);
-+	if (err) {
-+		/* If configuration didn't get read correctly, wait longer */
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error reading configuration from frontend");
-+		goto out;
-+	}
++	area = alloc_vm_area(PAGE_SIZE);
++	if (!area)
++		return ERR_PTR(-ENOMEM);
 +
-+	if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
-+		xenbus_dev_fatal(pdev->xdev, -EFAULT,
-+				 "version mismatch (%s/%s) with pcifront - "
-+				 "halting pciback",
-+				 magic, XEN_PCI_MAGIC);
-+		goto out;
++	gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++			  gnt_ref, dev->otherend_id);
++	
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
++
++	if (op.status != GNTST_okay) {
++		free_vm_area(area);
++		xenbus_dev_fatal(dev, op.status,
++				 "mapping in shared page %d from domain %d",
++				 gnt_ref, dev->otherend_id);
++		BUG_ON(!IS_ERR(ERR_PTR(op.status)));
++		return ERR_PTR(op.status);
 +	}
 +
-+	err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
-+	if (err)
-+		goto out;
++	/* Stuff the handle in an unused field */
++	area->phys_addr = (unsigned long)op.handle;
 +
-+	dev_dbg(&pdev->xdev->dev, "Connecting...\n");
++	return area;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
 +
-+	err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-+	if (err)
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error switching to connected state!");
 +
-+	dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
-+      out:
-+	spin_unlock(&pdev->dev_lock);
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++		   grant_handle_t *handle, void *vaddr)
++{
++	struct gnttab_map_grant_ref op;
++	
++	gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++			  gnt_ref, dev->otherend_id);
++	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++		BUG();
 +
-+	if (magic)
-+		kfree(magic);
++	if (op.status != GNTST_okay) {
++		xenbus_dev_fatal(dev, op.status,
++				 "mapping in shared page %d from domain %d",
++				 gnt_ref, dev->otherend_id);
++	} else
++		*handle = op.handle;
 +
-+	return err;
++	return op.status;
 +}
++EXPORT_SYMBOL_GPL(xenbus_map_ring);
 +
-+static void pciback_frontend_changed(struct xenbus_device *xdev,
-+				     enum xenbus_state fe_state)
-+{
-+	struct pciback_device *pdev = xdev->dev.driver_data;
 +
-+	dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
++/* Based on Rusty Russell's skeleton driver's unmap_page */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
++{
++	struct gnttab_unmap_grant_ref op;
 +
-+	switch (fe_state) {
-+	case XenbusStateInitialised:
-+		pciback_attach(pdev);
-+		break;
++	gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++			    (grant_handle_t)area->phys_addr);
 +
-+	case XenbusStateClosing:
-+		xenbus_switch_state(xdev, XenbusStateClosing);
-+		break;
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
 +
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
-+		device_unregister(&xdev->dev);
-+		break;
++	if (op.status == GNTST_okay)
++		free_vm_area(area);
++	else
++		xenbus_dev_error(dev, op.status,
++				 "unmapping page at handle %d error %d",
++				 (int16_t)area->phys_addr, op.status);
 +
-+	default:
-+		break;
-+	}
++	return op.status;
 +}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 +
-+static int pciback_publish_pci_root(struct pciback_device *pdev,
-+				    unsigned int domain, unsigned int bus)
++
++int xenbus_unmap_ring(struct xenbus_device *dev,
++		     grant_handle_t handle, void *vaddr)
 +{
-+	unsigned int d, b;
-+	int i, root_num, len, err;
-+	char str[64];
++	struct gnttab_unmap_grant_ref op;
 +
-+	dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
++	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++			    handle);
++	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++		BUG();
 +
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+			   "root_num", "%d", &root_num);
-+	if (err == 0 || err == -ENOENT)
-+		root_num = 0;
-+	else if (err < 0)
-+		goto out;
++	if (op.status != GNTST_okay)
++		xenbus_dev_error(dev, op.status,
++				 "unmapping page at handle %d error %d",
++				 handle, op.status);
 +
-+	/* Verify that we haven't already published this pci root */
-+	for (i = 0; i < root_num; i++) {
-+		len = snprintf(str, sizeof(str), "root-%d", i);
-+		if (unlikely(len >= (sizeof(str) - 1))) {
-+			err = -ENOMEM;
-+			goto out;
-+		}
++	return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
 +
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+				   str, "%x:%x", &d, &b);
-+		if (err < 0)
-+			goto out;
-+		if (err != 2) {
-+			err = -EINVAL;
-+			goto out;
-+		}
++int xenbus_dev_is_online(struct xenbus_device *dev)
++{
++	int rc, val;
 +
-+		if (d == domain && b == bus) {
-+			err = 0;
-+			goto out;
-+		}
-+	}
++	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
++	if (rc != 1)
++		val = 0; /* no online node present */
 +
-+	len = snprintf(str, sizeof(str), "root-%d", root_num);
-+	if (unlikely(len >= (sizeof(str) - 1))) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
++	return val;
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
 +
-+	dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
-+		root_num, domain, bus);
++MODULE_LICENSE("Dual BSD/GPL");
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_client.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_client.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,284 @@
++/******************************************************************************
++ * Client-facing interface for the Xenbus driver.  In other words, the
++ * interface between the Xenbus and the device-specific code, be it the
++ * frontend or the backend of that driver.
++ *
++ * Copyright (C) 2005 XenSource Ltd
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+			    "%04x:%02x", domain, bus);
-+	if (err)
-+		goto out;
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
 +
-+	err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
-+			    "root_num", "%d", (root_num + 1));
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+      out:
-+	return err;
++#define DPRINTK(fmt, args...) \
++    pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++const char *xenbus_strstate(enum xenbus_state state)
++{
++	static const char *const name[] = {
++		[ XenbusStateUnknown      ] = "Unknown",
++		[ XenbusStateInitialising ] = "Initialising",
++		[ XenbusStateInitWait     ] = "InitWait",
++		[ XenbusStateInitialised  ] = "Initialised",
++		[ XenbusStateConnected    ] = "Connected",
++		[ XenbusStateClosing      ] = "Closing",
++		[ XenbusStateClosed	  ] = "Closed",
++	};
++	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
 +}
++EXPORT_SYMBOL_GPL(xenbus_strstate);
 +
-+static int pciback_export_device(struct pciback_device *pdev,
-+				 int domain, int bus, int slot, int func)
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++		      struct xenbus_watch *watch,
++		      void (*callback)(struct xenbus_watch *,
++				       const char **, unsigned int))
 +{
-+	struct pci_dev *dev;
-+	int err = 0;
++	int err;
 +
-+	dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
-+		domain, bus, slot, func);
++	watch->node = path;
++	watch->callback = callback;
 +
-+	dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
-+	if (!dev) {
-+		err = -EINVAL;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Couldn't locate PCI device "
-+				 "(%04x:%02x:%02x.%01x)! "
-+				 "perhaps already in-use?",
-+				 domain, bus, slot, func);
-+		goto out;
++	err = register_xenbus_watch(watch);
++
++	if (err) {
++		watch->node = NULL;
++		watch->callback = NULL;
++		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
 +	}
 +
-+	err = pciback_add_pci_dev(pdev, dev);
-+	if (err)
-+		goto out;
++	return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_watch_path);
 +
-+	/* TODO: It'd be nice to export a bridge and have all of its children
-+	 * get exported with it. This may be best done in xend (which will
-+	 * have to calculate resource usage anyway) but we probably want to
-+	 * put something in here to ensure that if a bridge gets given to a
-+	 * driver domain, that all devices under that bridge are not given
-+	 * to other driver domains (as he who controls the bridge can disable
-+	 * it and stop the other devices from working).
-+	 */
-+      out:
++
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++		       const char *path2, struct xenbus_watch *watch,
++		       void (*callback)(struct xenbus_watch *,
++					const char **, unsigned int))
++{
++	int err;
++	char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
++	if (!state) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
++		return -ENOMEM;
++	}
++	err = xenbus_watch_path(dev, state, watch, callback);
++
++	if (err)
++		kfree(state);
 +	return err;
 +}
++EXPORT_SYMBOL_GPL(xenbus_watch_path2);
 +
-+static int pciback_setup_backend(struct pciback_device *pdev)
++
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
 +{
-+	/* Get configuration from xend (if available now) */
-+	int domain, bus, slot, func;
-+	int err = 0;
-+	int i, num_devs;
-+	char dev_str[64];
++	/* We check whether the state is currently set to the given value, and
++	   if not, then the state is set.  We don't want to unconditionally
++	   write the given state, because we don't want to fire watches
++	   unnecessarily.  Furthermore, if the node has gone, we don't write
++	   to it, as the device will be tearing down, and we don't want to
++	   resurrect that directory.
 +
-+	spin_lock(&pdev->dev_lock);
++	   Note that, because of this cached value of our state, this function
++	   will not work inside a Xenstore transaction (something it was
++	   trying to in the past) because dev->state would not get reset if
++	   the transaction was aborted.
 +
-+	/* It's possible we could get the call to setup twice, so make sure
-+	 * we're not already connected.
 +	 */
-+	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+	    XenbusStateInitWait)
-+		goto out;
 +
-+	dev_dbg(&pdev->xdev->dev, "getting be setup\n");
++	int current_state;
++	int err;
 +
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
-+			   &num_devs);
-+	if (err != 1) {
-+		if (err >= 0)
-+			err = -EINVAL;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error reading number of devices");
-+		goto out;
++	if (state == dev->state)
++		return 0;
++
++	err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
++			   &current_state);
++	if (err != 1)
++		return 0;
++
++	err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
++	if (err) {
++		if (state != XenbusStateClosing) /* Avoid looping */
++			xenbus_dev_fatal(dev, err, "writing new state");
++		return err;
 +	}
 +
-+	for (i = 0; i < num_devs; i++) {
-+		int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
-+		if (unlikely(l >= (sizeof(dev_str) - 1))) {
-+			err = -ENOMEM;
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "String overflow while reading "
-+					 "configuration");
-+			goto out;
-+		}
++	dev->state = state;
 +
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
-+				   "%x:%x:%x.%x", &domain, &bus, &slot, &func);
-+		if (err < 0) {
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "Error reading device configuration");
-+			goto out;
-+		}
-+		if (err != 4) {
-+			err = -EINVAL;
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "Error parsing pci device "
-+					 "configuration");
-+			goto out;
-+		}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_switch_state);
 +
-+		err = pciback_export_device(pdev, domain, bus, slot, func);
-+		if (err)
-+			goto out;
-+	}
++int xenbus_frontend_closed(struct xenbus_device *dev)
++{
++	xenbus_switch_state(dev, XenbusStateClosed);
++	complete(&dev->down);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
++
++/**
++ * Return the path to the error node for the given device, or NULL on failure.
++ * If the value returned is non-NULL, then it is the caller's to kfree.
++ */
++static char *error_path(struct xenbus_device *dev)
++{
++	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
++}
 +
-+	err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+	if (err) {
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error while publish PCI root buses "
-+				 "for frontend");
-+		goto out;
-+	}
 +
-+	err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
-+	if (err)
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error switching to initialised state!");
++void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		va_list ap)
++{
++	int ret;
++	unsigned int len;
++	char *printf_buffer = NULL, *path_buffer = NULL;
 +
-+      out:
-+	spin_unlock(&pdev->dev_lock);
++#define PRINTF_BUFFER_SIZE 4096
++	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++	if (printf_buffer == NULL)
++		goto fail;
 +
-+	if (!err)
-+		/* see if pcifront is already configured (if not, we'll wait) */
-+		pciback_attach(pdev);
++	len = sprintf(printf_buffer, "%i ", -err);
++	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
 +
-+	return err;
-+}
++	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
 +
-+static void pciback_be_watch(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	struct pciback_device *pdev =
-+	    container_of(watch, struct pciback_device, be_watch);
++	dev_err(&dev->dev, "%s\n", printf_buffer);
 +
-+	switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
-+	case XenbusStateInitWait:
-+		pciback_setup_backend(pdev);
-+		break;
++	path_buffer = error_path(dev);
 +
-+	default:
-+		break;
++	if (path_buffer == NULL) {
++		printk("xenbus: failed to write error node for %s (%s)\n",
++		       dev->nodename, printf_buffer);
++		goto fail;
 +	}
-+}
-+
-+static int pciback_xenbus_probe(struct xenbus_device *dev,
-+				const struct xenbus_device_id *id)
-+{
-+	int err = 0;
-+	struct pciback_device *pdev = alloc_pdev(dev);
 +
-+	if (pdev == NULL) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev, err,
-+				 "Error allocating pciback_device struct");
-+		goto out;
++	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
++		printk("xenbus: failed to write error node for %s (%s)\n",
++		       dev->nodename, printf_buffer);
++		goto fail;
 +	}
 +
-+	/* wait for xend to configure us */
-+	err = xenbus_switch_state(dev, XenbusStateInitWait);
-+	if (err)
-+		goto out;
++fail:
++	if (printf_buffer)
++		kfree(printf_buffer);
++	if (path_buffer)
++		kfree(path_buffer);
++}
 +
-+	/* watch the backend node for backend configuration information */
-+	err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
-+				pciback_be_watch);
-+	if (err)
-+		goto out;
-+	pdev->be_watching = 1;
 +
-+	/* We need to force a call to our callback here in case
-+	 * xend already configured us!
-+	 */
-+	pciback_be_watch(&pdev->be_watch, NULL, 0);
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		      ...)
++{
++	va_list ap;
 +
-+      out:
-+	return err;
++	va_start(ap, fmt);
++	_dev_error(dev, err, fmt, ap);
++	va_end(ap);
 +}
++EXPORT_SYMBOL_GPL(xenbus_dev_error);
 +
-+static int pciback_xenbus_remove(struct xenbus_device *dev)
++
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++		      ...)
 +{
-+	struct pciback_device *pdev = dev->dev.driver_data;
++	va_list ap;
 +
-+	if (pdev != NULL)
-+		free_pdev(pdev);
++	va_start(ap, fmt);
++	_dev_error(dev, err, fmt, ap);
++	va_end(ap);
 +
-+	return 0;
++	xenbus_switch_state(dev, XenbusStateClosing);
 +}
++EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
 +
-+static struct xenbus_device_id xenpci_ids[] = {
-+	{"pci"},
-+	{{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pciback_driver = {
-+	.name 			= "pciback",
-+	.owner 			= THIS_MODULE,
-+	.ids 			= xenpci_ids,
-+	.probe 			= pciback_xenbus_probe,
-+	.remove 		= pciback_xenbus_remove,
-+	.otherend_changed 	= pciback_frontend_changed,
-+};
 +
-+int __init pciback_xenbus_register(void)
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
-+
-+	return xenbus_register_backend(&xenbus_pciback_driver);
++	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
++	if (err < 0)
++		xenbus_dev_fatal(dev, err, "granting access to ring page");
++	return err;
 +}
++EXPORT_SYMBOL_GPL(xenbus_grant_ring);
 +
-+void __exit pciback_xenbus_unregister(void)
++
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
 +{
-+	xenbus_unregister_driver(&xenbus_pciback_driver);
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pcifront/Makefile tmp-linux-2.6-xen.patch/drivers/xen/pcifront/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/pcifront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pcifront/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,7 @@
-+obj-y += pcifront.o
++	struct evtchn_alloc_unbound alloc_unbound;
++	int err;
 +
-+pcifront-y := pci_op.o xenbus.o pci.o
++	alloc_unbound.dom        = DOMID_SELF;
++	alloc_unbound.remote_dom = dev->otherend_id;
 +
-+ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pcifront/pci.c tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pci.c
---- pristine-linux-2.6.18.2/drivers/xen/pcifront/pci.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pci.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,46 @@
-+/*
-+ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pcifront.h"
++	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++					  &alloc_unbound);
++	if (err)
++		xenbus_dev_fatal(dev, err, "allocating event channel");
++	else
++		*port = alloc_unbound.port;
 +
-+DEFINE_SPINLOCK(pcifront_dev_lock);
-+static struct pcifront_device *pcifront_dev = NULL;
++	return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
 +
-+int pcifront_connect(struct pcifront_device *pdev)
-+{
-+	int err = 0;
 +
-+	spin_lock(&pcifront_dev_lock);
++int xenbus_free_evtchn(struct xenbus_device *dev, int port)
++{
++	struct evtchn_close close;
++	int err;
 +
-+	if (!pcifront_dev) {
-+		dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
-+		pcifront_dev = pdev;
-+	}
-+	else {
-+		dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
-+		err = -EEXIST;
-+	}
++	close.port = port;
 +
-+	spin_unlock(&pcifront_dev_lock);
++	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++	if (err)
++		xenbus_dev_error(dev, err, "freeing event channel %d", port);
 +
 +	return err;
 +}
++EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 +
-+void pcifront_disconnect(struct pcifront_device *pdev)
-+{
-+	spin_lock(&pcifront_dev_lock);
 +
-+	if (pdev == pcifront_dev) {
-+		dev_info(&pdev->xdev->dev,
-+			 "Disconnecting PCI Frontend Buses\n");
-+		pcifront_dev = NULL;
-+	}
++enum xenbus_state xenbus_read_driver_state(const char *path)
++{
++	enum xenbus_state result;
++	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
++	if (err)
++		result = XenbusStateUnknown;
 +
-+	spin_unlock(&pcifront_dev_lock);
++	return result;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pcifront/pcifront.h tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pcifront.h
---- pristine-linux-2.6.18.2/drivers/xen/pcifront/pcifront.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pcifront.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,40 @@
-+/*
-+ * PCI Frontend - Common data structures & function declarations
++EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_comms.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_comms.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * xenbus_comms.c
 + *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Low level code to talks to Xen Store: ringbuffer and event channel.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+#ifndef __XEN_PCIFRONT_H__
-+#define __XEN_PCIFRONT_H__
 +
-+#include <linux/spinlock.h>
-+#include <linux/pci.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <linux/ptrace.h>
++#include <linux/workqueue.h>
++#include <xen/evtchn.h>
 +#include <xen/xenbus.h>
-+#include <xen/interface/io/pciif.h>
-+#include <xen/pcifront.h>
 +
-+struct pci_bus_entry {
-+	struct list_head list;
-+	struct pci_bus *bus;
-+};
++#include <asm/hypervisor.h>
 +
-+struct pcifront_device {
-+	struct xenbus_device *xdev;
-+	struct list_head root_buses;
-+	spinlock_t dev_lock;
++#include "xenbus_comms.h"
 +
-+	int evtchn;
-+	int gnt_ref;
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+	/* Lock this when doing any operations in sh_info */
-+	spinlock_t sh_info_lock;
-+	struct xen_pci_sharedinfo *sh_info;
-+};
++static int xenbus_irq;
 +
-+int pcifront_connect(struct pcifront_device *pdev);
-+void pcifront_disconnect(struct pcifront_device *pdev);
++extern void xenbus_probe(void *);
++extern int xenstored_ready;
++static DECLARE_WORK(probe_work, xenbus_probe, NULL);
 +
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+		       unsigned int domain, unsigned int bus);
-+void pcifront_free_roots(struct pcifront_device *pdev);
++static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
 +
-+#endif	/* __XEN_PCIFRONT_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pcifront/pci_op.c tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pci_op.c
---- pristine-linux-2.6.18.2/drivers/xen/pcifront/pci_op.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pcifront/pci_op.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,386 @@
-+/*
-+ * PCI Frontend Operations - Communicates with frontend
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/time.h>
-+#include <xen/evtchn.h>
-+#include "pcifront.h"
++static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++{
++	if (unlikely(xenstored_ready == 0)) {
++		xenstored_ready = 1;
++		schedule_work(&probe_work);
++	}
 +
-+static int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
++	wake_up(&xb_waitq);
++	return IRQ_HANDLED;
++}
 +
-+#ifdef __ia64__
-+static void pcifront_init_sd(struct pcifront_sd *sd,
-+			     unsigned int domain, unsigned int bus,
-+			     struct pcifront_device *pdev)
++static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
 +{
-+	int err, i, j, k, len, root_num, res_count;
-+	struct acpi_resource res;
-+	unsigned int d, b, byte;
-+	unsigned long magic;
-+	char str[64], tmp[3];
-+	unsigned char *buf, *bufp;
-+	u8 *ptr;
++	return ((prod - cons) <= XENSTORE_RING_SIZE);
++}
 +
-+	memset(sd, 0, sizeof(*sd));
++static void *get_output_chunk(XENSTORE_RING_IDX cons,
++			      XENSTORE_RING_IDX prod,
++			      char *buf, uint32_t *len)
++{
++	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
++	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
++		*len = XENSTORE_RING_SIZE - (prod - cons);
++	return buf + MASK_XENSTORE_IDX(prod);
++}
 +
-+	sd->segment = domain;
-+	sd->node = -1;	/* Revisit for NUMA */
-+	sd->platform_data = pdev;
++static const void *get_input_chunk(XENSTORE_RING_IDX cons,
++				   XENSTORE_RING_IDX prod,
++				   const char *buf, uint32_t *len)
++{
++	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
++	if ((prod - cons) < *len)
++		*len = prod - cons;
++	return buf + MASK_XENSTORE_IDX(cons);
++}
 +
-+	/* Look for resources for this controller in xenbus. */
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num",
-+			   "%d", &root_num);
-+	if (err != 1)
-+		return;
++int xb_write(const void *data, unsigned len)
++{
++	struct xenstore_domain_interface *intf = xen_store_interface;
++	XENSTORE_RING_IDX cons, prod;
++	int rc;
 +
-+	for (i = 0; i < root_num; i++) {
-+		len = snprintf(str, sizeof(str), "root-%d", i);
-+		if (unlikely(len >= (sizeof(str) - 1)))
-+			return;
++	while (len != 0) {
++		void *dst;
++		unsigned int avail;
 +
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
-+				   str, "%x:%x", &d, &b);
-+		if (err != 2)
-+			return;
++		rc = wait_event_interruptible(
++			xb_waitq,
++			(intf->req_prod - intf->req_cons) !=
++			XENSTORE_RING_SIZE);
++		if (rc < 0)
++			return rc;
 +
-+		if (d == domain && b == bus)
-+			break;
-+	}
++		/* Read indexes, then verify. */
++		cons = intf->req_cons;
++		prod = intf->req_prod;
++		if (!check_indexes(cons, prod)) {
++			intf->req_cons = intf->req_prod = 0;
++			return -EIO;
++		}
 +
-+	if (i == root_num)
-+		return;
++		dst = get_output_chunk(cons, prod, intf->req, &avail);
++		if (avail == 0)
++			continue;
++		if (avail > len)
++			avail = len;
 +
-+	len = snprintf(str, sizeof(str), "root-resource-magic");
++		/* Must write data /after/ reading the consumer index. */
++		mb();
 +
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
-+			   str, "%lx", &magic);
++		memcpy(dst, data, avail);
++		data += avail;
++		len -= avail;
 +
-+	if (err != 1)
-+		return; /* No resources, nothing to do */
++		/* Other side must not see new producer until data is there. */
++		wmb();
++		intf->req_prod += avail;
 +
-+	if (magic != (sizeof(res) * 2) + 1) {
-+		printk(KERN_WARNING "pcifront: resource magic mismatch\n");
-+		return;
++		/* Implies mb(): other side will see the updated producer. */
++		notify_remote_via_evtchn(xen_store_evtchn);
 +	}
 +
-+	len = snprintf(str, sizeof(str), "root-%d-resources", i);
-+	if (unlikely(len >= (sizeof(str) - 1)))
-+		return;
-+
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
-+			   str, "%d", &res_count);
-+
-+	if (err != 1)
-+		return; /* No resources, nothing to do */
-+
-+	sd->window = kzalloc(sizeof(*sd->window) * res_count, GFP_KERNEL);
-+	if (!sd->window)
-+		return;
-+
-+	/* magic is also the size of the byte stream in xenbus */
-+	buf = kmalloc(magic, GFP_KERNEL);
-+	if (!buf) {
-+		kfree(sd->window);
-+		sd->window = NULL;
-+		return;
-+	}
++	return 0;
++}
 +
-+	/* Read the resources out of xenbus */
-+	for (j = 0; j < res_count; j++) {
-+		memset(&res, 0, sizeof(res));
-+		memset(buf, 0, magic);
++int xb_data_to_read(void)
++{
++	struct xenstore_domain_interface *intf = xen_store_interface;
++	return (intf->rsp_cons != intf->rsp_prod);
++}
 +
-+		len = snprintf(str, sizeof(str), "root-%d-resource-%d", i, j);
-+		if (unlikely(len >= (sizeof(str) - 1)))
-+			return;
++int xb_wait_for_data_to_read(void)
++{
++	return wait_event_interruptible(xb_waitq, xb_data_to_read());
++}
 +
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
-+				   "%s", buf);
-+		if (err != 1) {
-+			printk(KERN_WARNING "pcifront: error reading "
-+			       "resource %d on bus %04x:%02x\n",
-+			       j, domain, bus);
-+			continue;
-+		}
++int xb_read(void *data, unsigned len)
++{
++	struct xenstore_domain_interface *intf = xen_store_interface;
++	XENSTORE_RING_IDX cons, prod;
++	int rc;
 +
-+		bufp = buf;
-+		ptr = (u8 *)&res;
-+		memset(tmp, 0, sizeof(tmp));
++	while (len != 0) {
++		unsigned int avail;
++		const char *src;
 +
-+		/* Copy ASCII byte stream into structure */
-+		for (k = 0; k < magic - 1; k += 2) {
-+			memcpy(tmp, bufp, 2);
-+			bufp += 2;
++		rc = xb_wait_for_data_to_read();
++		if (rc < 0)
++			return rc;
 +
-+			sscanf(tmp, "%02x", &byte);
-+			*ptr = byte;
-+			ptr++;
++		/* Read indexes, then verify. */
++		cons = intf->rsp_cons;
++		prod = intf->rsp_prod;
++		if (!check_indexes(cons, prod)) {
++			intf->rsp_cons = intf->rsp_prod = 0;
++			return -EIO;
 +		}
 +
-+		xen_add_resource(sd, domain, bus, &res);
-+		sd->windows++;
-+	}
-+	kfree(buf);
-+}
-+#endif
++		src = get_input_chunk(cons, prod, intf->rsp, &avail);
++		if (avail == 0)
++			continue;
++		if (avail > len)
++			avail = len;
 +
-+static int errno_to_pcibios_err(int errno)
-+{
-+	switch (errno) {
-+	case XEN_PCI_ERR_success:
-+		return PCIBIOS_SUCCESSFUL;
++		/* Must read data /after/ reading the producer index. */
++		rmb();
 +
-+	case XEN_PCI_ERR_dev_not_found:
-+		return PCIBIOS_DEVICE_NOT_FOUND;
++		memcpy(data, src, avail);
++		data += avail;
++		len -= avail;
 +
-+	case XEN_PCI_ERR_invalid_offset:
-+	case XEN_PCI_ERR_op_failed:
-+		return PCIBIOS_BAD_REGISTER_NUMBER;
++		/* Other side must not see free space until we've copied out */
++		mb();
++		intf->rsp_cons += avail;
 +
-+	case XEN_PCI_ERR_not_implemented:
-+		return PCIBIOS_FUNC_NOT_SUPPORTED;
++		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
 +
-+	case XEN_PCI_ERR_access_denied:
-+		return PCIBIOS_SET_FAILED;
++		/* Implies mb(): other side will see the updated consumer. */
++		notify_remote_via_evtchn(xen_store_evtchn);
 +	}
-+	return errno;
++
++	return 0;
 +}
 +
-+static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
++/* Set up interrupt handler off store event channel. */
++int xb_init_comms(void)
 +{
-+	int err = 0;
-+	struct xen_pci_op *active_op = &pdev->sh_info->op;
-+	unsigned long irq_flags;
-+	evtchn_port_t port = pdev->evtchn;
-+	s64 ns, ns_timeout;
-+	struct timeval tv;
-+
-+	spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
-+
-+	memcpy(active_op, op, sizeof(struct xen_pci_op));
++	struct xenstore_domain_interface *intf = xen_store_interface;
++	int err;
 +
-+	/* Go */
-+	wmb();
-+	set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+	notify_remote_via_evtchn(port);
++	if (intf->req_prod != intf->req_cons)
++		printk(KERN_ERR "XENBUS request ring is not quiescent "
++		       "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
 +
-+	/*
-+	 * We set a poll timeout of 3 seconds but give up on return after
-+	 * 2 seconds. It is better to time out too late rather than too early
-+	 * (in the latter case we end up continually re-executing poll() with a
-+	 * timeout in the past). 1s difference gives plenty of slack for error.
-+	 */
-+	do_gettimeofday(&tv);
-+	ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
++	if (intf->rsp_prod != intf->rsp_cons) {
++		printk(KERN_WARNING "XENBUS response ring is not quiescent "
++		       "(%08x:%08x): fixing up\n",
++		       intf->rsp_cons, intf->rsp_prod);
++		intf->rsp_cons = intf->rsp_prod;
++	}
 +
-+	clear_evtchn(port);
++	if (xenbus_irq)
++		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
 +
-+	while (test_bit(_XEN_PCIF_active,
-+			(unsigned long *)&pdev->sh_info->flags)) {
-+		if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
-+			BUG();
-+		clear_evtchn(port);
-+		do_gettimeofday(&tv);
-+		ns = timeval_to_ns(&tv);
-+		if (ns > ns_timeout) {
-+			dev_err(&pdev->xdev->dev,
-+				"pciback not responding!!!\n");
-+			clear_bit(_XEN_PCIF_active,
-+				  (unsigned long *)&pdev->sh_info->flags);
-+			err = XEN_PCI_ERR_dev_not_found;
-+			goto out;
-+		}
++	err = bind_caller_port_to_irqhandler(
++		xen_store_evtchn, wake_waiting,
++		0, "xenbus", &xb_waitq);
++	if (err <= 0) {
++		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
++		return err;
 +	}
 +
-+	memcpy(op, active_op, sizeof(struct xen_pci_op));
++	xenbus_irq = err;
 +
-+	err = op->err;
-+      out:
-+	spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
-+	return err;
++	return 0;
 +}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_comms.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_comms.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,46 @@
++/*
++ * Private include for xenbus communications.
++ * 
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
-+			     int where, int size, u32 * val)
-+{
-+	int err = 0;
-+	struct xen_pci_op op = {
-+		.cmd    = XEN_PCI_OP_conf_read,
-+		.domain = pci_domain_nr(bus),
-+		.bus    = bus->number,
-+		.devfn  = devfn,
-+		.offset = where,
-+		.size   = size,
-+	};
-+	struct pcifront_sd *sd = bus->sysdata;
-+	struct pcifront_device *pdev = pcifront_get_pdev(sd);
++#ifndef _XENBUS_COMMS_H
++#define _XENBUS_COMMS_H
 +
-+	if (verbose_request)
-+		dev_info(&pdev->xdev->dev,
-+			 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
-+			 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
-+			 PCI_FUNC(devfn), where, size);
++int xs_init(void);
++int xb_init_comms(void);
 +
-+	err = do_pci_op(pdev, &op);
++/* Low level routines. */
++int xb_write(const void *data, unsigned len);
++int xb_read(void *data, unsigned len);
++int xb_data_to_read(void);
++int xb_wait_for_data_to_read(void);
++int xs_input_avail(void);
++extern struct xenstore_domain_interface *xen_store_interface;
++extern int xen_store_evtchn;
 +
-+	if (likely(!err)) {
-+		if (verbose_request)
-+			dev_info(&pdev->xdev->dev, "read got back value %x\n",
-+				 op.value);
++#endif /* _XENBUS_COMMS_H */
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_dev.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_dev.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,408 @@
++/*
++ * xenbus_dev.c
++ * 
++ * Driver giving user-space access to the kernel's xenbus connection
++ * to xenstore.
++ * 
++ * Copyright (c) 2005, Christian Limpach
++ * Copyright (c) 2005, Rusty Russell, IBM Corporation
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+		*val = op.value;
-+	} else if (err == -ENODEV) {
-+		/* No device here, pretend that it just returned 0 */
-+		err = 0;
-+		*val = 0;
-+	}
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/uio.h>
++#include <linux/notifier.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/mutex.h>
 +
-+	return errno_to_pcibios_err(err);
-+}
++#include "xenbus_comms.h"
 +
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
-+			      int where, int size, u32 val)
-+{
-+	struct xen_pci_op op = {
-+		.cmd    = XEN_PCI_OP_conf_write,
-+		.domain = pci_domain_nr(bus),
-+		.bus    = bus->number,
-+		.devfn  = devfn,
-+		.offset = where,
-+		.size   = size,
-+		.value  = val,
-+	};
-+	struct pcifront_sd *sd = bus->sysdata;
-+	struct pcifront_device *pdev = pcifront_get_pdev(sd);
++#include <asm/uaccess.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
 +
-+	if (verbose_request)
-+		dev_info(&pdev->xdev->dev,
-+			 "write dev=%04x:%02x:%02x.%01x - "
-+			 "offset %x size %d val %x\n",
-+			 pci_domain_nr(bus), bus->number,
-+			 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+	return errno_to_pcibios_err(do_pci_op(pdev, &op));
-+}
++struct xenbus_dev_transaction {
++	struct list_head list;
++	struct xenbus_transaction handle;
++};
 +
-+struct pci_ops pcifront_bus_ops = {
-+	.read = pcifront_bus_read,
-+	.write = pcifront_bus_write,
++struct read_buffer {
++	struct list_head list;
++	unsigned int cons;
++	unsigned int len;
++	char msg[];
 +};
 +
-+/* Claim resources for the PCI frontend as-is, backend won't allow changes */
-+static void pcifront_claim_resource(struct pci_dev *dev, void *data)
-+{
-+	struct pcifront_device *pdev = data;
-+	int i;
-+	struct resource *r;
++struct xenbus_dev_data {
++	/* In-progress transaction. */
++	struct list_head transactions;
 +
-+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-+		r = &dev->resource[i];
++	/* Active watches. */
++	struct list_head watches;
 +
-+		if (!r->parent && r->start && r->flags) {
-+			dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
-+				pci_name(dev), i);
-+			pci_claim_resource(dev, i);
-+		}
-+	}
-+}
++	/* Partial request. */
++	unsigned int len;
++	union {
++		struct xsd_sockmsg msg;
++		char buffer[PAGE_SIZE];
++	} u;
 +
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+		       unsigned int domain, unsigned int bus)
-+{
-+	struct pci_bus *b;
-+	struct pcifront_sd *sd = NULL;
-+	struct pci_bus_entry *bus_entry = NULL;
-+	int err = 0;
++	/* Response queue. */
++	struct list_head read_buffers;
++	wait_queue_head_t read_waitq;
 +
-+#ifndef CONFIG_PCI_DOMAINS
-+	if (domain != 0) {
-+		dev_err(&pdev->xdev->dev,
-+			"PCI Root in non-zero PCI Domain! domain=%d\n", domain);
-+		dev_err(&pdev->xdev->dev,
-+			"Please compile with CONFIG_PCI_DOMAINS\n");
-+		err = -EINVAL;
-+		goto err_out;
-+	}
-+#endif
++	struct mutex reply_mutex;
++};
 +
-+	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
-+		 domain, bus);
++static struct proc_dir_entry *xenbus_dev_intf;
 +
-+	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-+	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
-+	if (!bus_entry || !sd) {
-+		err = -ENOMEM;
-+		goto err_out;
++static ssize_t xenbus_dev_read(struct file *filp,
++			       char __user *ubuf,
++			       size_t len, loff_t *ppos)
++{
++	struct xenbus_dev_data *u = filp->private_data;
++	struct read_buffer *rb;
++	int i, ret;
++
++	mutex_lock(&u->reply_mutex);
++	while (list_empty(&u->read_buffers)) {
++		mutex_unlock(&u->reply_mutex);
++		ret = wait_event_interruptible(u->read_waitq,
++					       !list_empty(&u->read_buffers));
++		if (ret)
++			return ret;
++		mutex_lock(&u->reply_mutex);
 +	}
-+	pcifront_init_sd(sd, domain, bus, pdev);
 +
-+	b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
-+				  &pcifront_bus_ops, sd);
-+	if (!b) {
-+		dev_err(&pdev->xdev->dev,
-+			"Error creating PCI Frontend Bus!\n");
-+		err = -ENOMEM;
-+		goto err_out;
++	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
++	for (i = 0; i < len;) {
++		put_user(rb->msg[rb->cons], ubuf + i);
++		i++;
++		rb->cons++;
++		if (rb->cons == rb->len) {
++			list_del(&rb->list);
++			kfree(rb);
++			if (list_empty(&u->read_buffers))
++				break;
++			rb = list_entry(u->read_buffers.next,
++					struct read_buffer, list);
++		}
 +	}
++	mutex_unlock(&u->reply_mutex);
 +
-+	pcifront_setup_root_resources(b, sd);
-+	bus_entry->bus = b;
++	return i;
++}
 +
-+	list_add(&bus_entry->list, &pdev->root_buses);
++static void queue_reply(struct xenbus_dev_data *u,
++			char *data, unsigned int len)
++{
++	struct read_buffer *rb;
 +
-+	/* Claim resources before going "live" with our devices */
-+	pci_walk_bus(b, pcifront_claim_resource, pdev);
++	if (len == 0)
++		return;
 +
-+	pci_bus_add_devices(b);
++	rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
++	BUG_ON(rb == NULL);
 +
-+	return 0;
++	rb->cons = 0;
++	rb->len = len;
 +
-+      err_out:
-+	kfree(bus_entry);
-+	kfree(sd);
++	memcpy(rb->msg, data, len);
 +
-+	return err;
++	list_add_tail(&rb->list, &u->read_buffers);
++
++	wake_up(&u->read_waitq);
 +}
 +
-+static void free_root_bus_devs(struct pci_bus *bus)
++struct watch_adapter
 +{
-+	struct pci_dev *dev;
++	struct list_head list;
++	struct xenbus_watch watch;
++	struct xenbus_dev_data *dev_data;
++	char *token;
++};
 +
-+	while (!list_empty(&bus->devices)) {
-+		dev = container_of(bus->devices.next, struct pci_dev,
-+				   bus_list);
-+		dev_dbg(&dev->dev, "removing device\n");
-+		pci_remove_bus_device(dev);
-+	}
++static void free_watch_adapter (struct watch_adapter *watch)
++{
++	kfree(watch->watch.node);
++	kfree(watch->token);
++	kfree(watch);
 +}
 +
-+void pcifront_free_roots(struct pcifront_device *pdev)
++static void watch_fired(struct xenbus_watch *watch,
++			const char **vec,
++			unsigned int len)
 +{
-+	struct pci_bus_entry *bus_entry, *t;
-+
-+	dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
-+
-+	list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
-+		list_del(&bus_entry->list);
-+
-+		free_root_bus_devs(bus_entry->bus);
++	struct watch_adapter *adap =
++            container_of(watch, struct watch_adapter, watch);
++	struct xsd_sockmsg hdr;
++	const char *path, *token;
++	int path_len, tok_len, body_len, data_len = 0;
++	
++	path = vec[XS_WATCH_PATH];
++	token = adap->token;
 +
-+		kfree(bus_entry->bus->sysdata);
++	path_len = strlen(path) + 1;
++	tok_len = strlen(token) + 1;
++	if (len > 2)
++		data_len = vec[len] - vec[2] + 1;
++	body_len = path_len + tok_len + data_len;
 +
-+		device_unregister(bus_entry->bus->bridge);
-+		pci_remove_bus(bus_entry->bus);
++	hdr.type = XS_WATCH_EVENT;
++	hdr.len = body_len;
 +
-+		kfree(bus_entry);
-+	}
++	mutex_lock(&adap->dev_data->reply_mutex);
++	queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
++	queue_reply(adap->dev_data, (char *)path, path_len);
++	queue_reply(adap->dev_data, (char *)token, tok_len);
++	if (len > 2)
++		queue_reply(adap->dev_data, (char *)vec[2], data_len);
++	mutex_unlock(&adap->dev_data->reply_mutex);
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/pcifront/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/pcifront/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/pcifront/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/pcifront/xenbus.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,296 @@
-+/*
-+ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
-+ *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <xen/xenbus.h>
-+#include <xen/gnttab.h>
-+#include "pcifront.h"
 +
-+#define INVALID_GRANT_REF (0)
-+#define INVALID_EVTCHN    (-1)
++static LIST_HEAD(watch_list);
 +
-+static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
++static ssize_t xenbus_dev_write(struct file *filp,
++				const char __user *ubuf,
++				size_t len, loff_t *ppos)
 +{
-+	struct pcifront_device *pdev;
++	struct xenbus_dev_data *u = filp->private_data;
++	struct xenbus_dev_transaction *trans = NULL;
++	uint32_t msg_type;
++	void *reply;
++	char *path, *token;
++	struct watch_adapter *watch, *tmp_watch;
++	int err, rc = len;
 +
-+	pdev = kmalloc(sizeof(struct pcifront_device), GFP_KERNEL);
-+	if (pdev == NULL)
++	if ((len + u->len) > sizeof(u->u.buffer)) {
++		rc = -EINVAL;
 +		goto out;
++	}
 +
-+	pdev->sh_info =
-+	    (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
-+	if (pdev->sh_info == NULL) {
-+		kfree(pdev);
-+		pdev = NULL;
++	if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
++		rc = -EFAULT;
 +		goto out;
 +	}
-+	pdev->sh_info->flags = 0;
 +
-+	xdev->dev.driver_data = pdev;
-+	pdev->xdev = xdev;
++	u->len += len;
++	if ((u->len < sizeof(u->u.msg)) ||
++	    (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
++		return rc;
++
++	msg_type = u->u.msg.type;
++
++	switch (msg_type) {
++	case XS_TRANSACTION_START:
++	case XS_TRANSACTION_END:
++	case XS_DIRECTORY:
++	case XS_READ:
++	case XS_GET_PERMS:
++	case XS_RELEASE:
++	case XS_GET_DOMAIN_PATH:
++	case XS_WRITE:
++	case XS_MKDIR:
++	case XS_RM:
++	case XS_SET_PERMS:
++		if (msg_type == XS_TRANSACTION_START) {
++			trans = kmalloc(sizeof(*trans), GFP_KERNEL);
++			if (!trans) {
++				rc = -ENOMEM;
++				goto out;
++			}
++		}
++
++		reply = xenbus_dev_request_and_reply(&u->u.msg);
++		if (IS_ERR(reply)) {
++			kfree(trans);
++			rc = PTR_ERR(reply);
++			goto out;
++		}
++
++		if (msg_type == XS_TRANSACTION_START) {
++			trans->handle.id = simple_strtoul(reply, NULL, 0);
++			list_add(&trans->list, &u->transactions);
++		} else if (msg_type == XS_TRANSACTION_END) {
++			list_for_each_entry(trans, &u->transactions, list)
++				if (trans->handle.id == u->u.msg.tx_id)
++					break;
++			BUG_ON(&trans->list == &u->transactions);
++			list_del(&trans->list);
++			kfree(trans);
++		}
++		mutex_lock(&u->reply_mutex);
++		queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
++		queue_reply(u, (char *)reply, u->u.msg.len);
++		mutex_unlock(&u->reply_mutex);
++		kfree(reply);
++		break;
++
++	case XS_WATCH:
++	case XS_UNWATCH: {
++		static const char *XS_RESP = "OK";
++		struct xsd_sockmsg hdr;
++
++		path = u->u.buffer + sizeof(u->u.msg);
++		token = memchr(path, 0, u->u.msg.len);
++		if (token == NULL) {
++			rc = -EILSEQ;
++			goto out;
++		}
++		token++;
++
++		if (msg_type == XS_WATCH) {
++			watch = kzalloc(sizeof(*watch), GFP_KERNEL);
++			watch->watch.node = kmalloc(strlen(path)+1,
++                                                    GFP_KERNEL);
++			strcpy((char *)watch->watch.node, path);
++			watch->watch.callback = watch_fired;
++			watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
++			strcpy(watch->token, token);
++			watch->dev_data = u;
 +
-+	INIT_LIST_HEAD(&pdev->root_buses);
++			err = register_xenbus_watch(&watch->watch);
++			if (err) {
++				free_watch_adapter(watch);
++				rc = err;
++				goto out;
++			}
++			
++			list_add(&watch->list, &u->watches);
++		} else {
++			list_for_each_entry_safe(watch, tmp_watch,
++                                                 &u->watches, list) {
++				if (!strcmp(watch->token, token) &&
++				    !strcmp(watch->watch.node, path))
++				{
++					unregister_xenbus_watch(&watch->watch);
++					list_del(&watch->list);
++					free_watch_adapter(watch);
++					break;
++				}
++			}
++		}
 +
-+	spin_lock_init(&pdev->dev_lock);
-+	spin_lock_init(&pdev->sh_info_lock);
++		hdr.type = msg_type;
++		hdr.len = strlen(XS_RESP) + 1;
++		mutex_lock(&u->reply_mutex);
++		queue_reply(u, (char *)&hdr, sizeof(hdr));
++		queue_reply(u, (char *)XS_RESP, hdr.len);
++		mutex_unlock(&u->reply_mutex);
++		break;
++	}
 +
-+	pdev->evtchn = INVALID_EVTCHN;
-+	pdev->gnt_ref = INVALID_GRANT_REF;
++	default:
++		rc = -EINVAL;
++		break;
++	}
 +
-+	dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
-+		pdev, pdev->sh_info);
-+      out:
-+	return pdev;
++ out:
++	u->len = 0;
++	return rc;
 +}
 +
-+static void free_pdev(struct pcifront_device *pdev)
++static int xenbus_dev_open(struct inode *inode, struct file *filp)
 +{
-+	dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
-+
-+	pcifront_free_roots(pdev);
++	struct xenbus_dev_data *u;
 +
-+	if (pdev->evtchn != INVALID_EVTCHN)
-+		xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
++	if (xen_store_evtchn == 0)
++		return -ENOENT;
 +
-+	if (pdev->gnt_ref != INVALID_GRANT_REF)
-+		gnttab_end_foreign_access(pdev->gnt_ref, 0,
-+					  (unsigned long)pdev->sh_info);
++	nonseekable_open(inode, filp);
 +
-+	pdev->xdev->dev.driver_data = NULL;
++	u = kzalloc(sizeof(*u), GFP_KERNEL);
++	if (u == NULL)
++		return -ENOMEM;
 +
-+	kfree(pdev);
-+}
++	INIT_LIST_HEAD(&u->transactions);
++	INIT_LIST_HEAD(&u->watches);
++	INIT_LIST_HEAD(&u->read_buffers);
++	init_waitqueue_head(&u->read_waitq);
 +
-+static int pcifront_publish_info(struct pcifront_device *pdev)
-+{
-+	int err = 0;
-+	struct xenbus_transaction trans;
++	mutex_init(&u->reply_mutex);
 +
-+	err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
-+	if (err < 0)
-+		goto out;
++	filp->private_data = u;
 +
-+	pdev->gnt_ref = err;
++	return 0;
++}
 +
-+	err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
-+	if (err)
-+		goto out;
++static int xenbus_dev_release(struct inode *inode, struct file *filp)
++{
++	struct xenbus_dev_data *u = filp->private_data;
++	struct xenbus_dev_transaction *trans, *tmp;
++	struct watch_adapter *watch, *tmp_watch;
 +
-+      do_publish:
-+	err = xenbus_transaction_start(&trans);
-+	if (err) {
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error writing configuration for backend "
-+				 "(start transaction)");
-+		goto out;
++	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
++		xenbus_transaction_end(trans->handle, 1);
++		list_del(&trans->list);
++		kfree(trans);
 +	}
 +
-+	err = xenbus_printf(trans, pdev->xdev->nodename,
-+			    "pci-op-ref", "%u", pdev->gnt_ref);
-+	if (!err)
-+		err = xenbus_printf(trans, pdev->xdev->nodename,
-+				    "event-channel", "%u", pdev->evtchn);
-+	if (!err)
-+		err = xenbus_printf(trans, pdev->xdev->nodename,
-+				    "magic", XEN_PCI_MAGIC);
-+
-+	if (err) {
-+		xenbus_transaction_end(trans, 1);
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error writing configuration for backend");
-+		goto out;
-+	} else {
-+		err = xenbus_transaction_end(trans, 0);
-+		if (err == -EAGAIN)
-+			goto do_publish;
-+		else if (err) {
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "Error completing transaction "
-+					 "for backend");
-+			goto out;
-+		}
++	list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
++		unregister_xenbus_watch(&watch->watch);
++		list_del(&watch->list);
++		free_watch_adapter(watch);
 +	}
 +
-+	xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
-+
-+	dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
++	kfree(u);
 +
-+      out:
-+	return err;
++	return 0;
 +}
 +
-+static int pcifront_try_connect(struct pcifront_device *pdev)
++static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
 +{
-+	int err = -EFAULT;
-+	int i, num_roots, len;
-+	char str[64];
-+	unsigned int domain, bus;
++	struct xenbus_dev_data *u = file->private_data;
 +
-+	spin_lock(&pdev->dev_lock);
++	poll_wait(file, &u->read_waitq, wait);
++	if (!list_empty(&u->read_buffers))
++		return POLLIN | POLLRDNORM;
++	return 0;
++}
 +
-+	/* Only connect once */
-+	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+	    XenbusStateInitialised)
-+		goto out;
++static const struct file_operations xenbus_dev_file_ops = {
++	.read = xenbus_dev_read,
++	.write = xenbus_dev_write,
++	.open = xenbus_dev_open,
++	.release = xenbus_dev_release,
++	.poll = xenbus_dev_poll,
++};
 +
-+	err = pcifront_connect(pdev);
-+	if (err) {
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error connecting PCI Frontend");
-+		goto out;
-+	}
++int xenbus_dev_init(void)
++{
++	xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
++	if (xenbus_dev_intf)
++		xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
 +
-+	err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
-+			   "root_num", "%d", &num_roots);
-+	if (err == -ENOENT) {
-+		xenbus_dev_error(pdev->xdev, err,
-+				 "No PCI Roots found, trying 0000:00");
-+		err = pcifront_scan_root(pdev, 0, 0);
-+		num_roots = 0;
-+	} else if (err != 1) {
-+		if (err == 0)
-+			err = -EINVAL;
-+		xenbus_dev_fatal(pdev->xdev, err,
-+				 "Error reading number of PCI roots");
-+		goto out;
-+	}
++	return 0;
++}
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_probe.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_probe.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1153 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	for (i = 0; i < num_roots; i++) {
-+		len = snprintf(str, sizeof(str), "root-%d", i);
-+		if (unlikely(len >= (sizeof(str) - 1))) {
-+			err = -ENOMEM;
-+			goto out;
-+		}
++#define DPRINTK(fmt, args...)				\
++	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
++		 __FUNCTION__, __LINE__, ##args)
 +
-+		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
-+				   "%x:%x", &domain, &bus);
-+		if (err != 2) {
-+			if (err >= 0)
-+				err = -EINVAL;
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "Error reading PCI root %d", i);
-+			goto out;
-+		}
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++#include <linux/mutex.h>
++#include <linux/module.h>
 +
-+		err = pcifront_scan_root(pdev, domain, bus);
-+		if (err) {
-+			xenbus_dev_fatal(pdev->xdev, err,
-+					 "Error scanning PCI root %04x:%02x",
-+					 domain, bus);
-+			goto out;
-+		}
-+	}
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#ifdef MODULE
++#include <xen/hvm.h>
++#endif
 +
-+	err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-+	if (err)
-+		goto out;
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
 +
-+      out:
-+	spin_unlock(&pdev->dev_lock);
-+	return err;
-+}
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+static int pcifront_try_disconnect(struct pcifront_device *pdev)
-+{
-+	int err = 0;
-+	enum xenbus_state prev_state;
++int xen_store_evtchn;
++struct xenstore_domain_interface *xen_store_interface;
++static unsigned long xen_store_mfn;
 +
-+	spin_lock(&pdev->dev_lock);
++extern struct mutex xenwatch_mutex;
 +
-+	prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
++static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
 +
-+	if (prev_state < XenbusStateClosing)
-+		err = xenbus_switch_state(pdev->xdev, XenbusStateClosing);
++static void wait_for_devices(struct xenbus_driver *xendrv);
 +
-+	if (!err && prev_state == XenbusStateConnected)
-+		pcifront_disconnect(pdev);
++static int xenbus_probe_frontend(const char *type, const char *name);
 +
-+	spin_unlock(&pdev->dev_lock);
++static void xenbus_dev_shutdown(struct device *_dev);
 +
-+	return err;
++/* If something in array of ids matches this device, return it. */
++static const struct xenbus_device_id *
++match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
++{
++	for (; *arr->devicetype != '\0'; arr++) {
++		if (!strcmp(arr->devicetype, dev->devicetype))
++			return arr;
++	}
++	return NULL;
 +}
 +
-+static void pcifront_backend_changed(struct xenbus_device *xdev,
-+				     enum xenbus_state be_state)
++int xenbus_match(struct device *_dev, struct device_driver *_drv)
 +{
-+	struct pcifront_device *pdev = xdev->dev.driver_data;
-+
-+	switch (be_state) {
-+	case XenbusStateClosing:
-+		dev_warn(&xdev->dev, "backend going away!\n");
-+		pcifront_try_disconnect(pdev);
-+		break;
++	struct xenbus_driver *drv = to_xenbus_driver(_drv);
 +
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		dev_warn(&xdev->dev, "backend went away!\n");
-+		pcifront_try_disconnect(pdev);
++	if (!drv->ids)
++		return 0;
 +
-+		device_unregister(&pdev->xdev->dev);
-+		break;
++	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
++}
 +
-+	case XenbusStateConnected:
-+		pcifront_try_connect(pdev);
-+		break;
++/* device/<type>/<id> => <type>-<id> */
++static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++	nodename = strchr(nodename, '/');
++	if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
++		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
++		return -EINVAL;
++	}
 +
-+	default:
-+		break;
++	strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
++	if (!strchr(bus_id, '/')) {
++		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
++		return -EINVAL;
 +	}
++	*strchr(bus_id, '/') = '-';
++	return 0;
 +}
 +
-+static int pcifront_xenbus_probe(struct xenbus_device *xdev,
-+				 const struct xenbus_device_id *id)
-+{
-+	int err = 0;
-+	struct pcifront_device *pdev = alloc_pdev(xdev);
 +
-+	if (pdev == NULL) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(xdev, err,
-+				 "Error allocating pcifront_device struct");
-+		goto out;
-+	}
++static void free_otherend_details(struct xenbus_device *dev)
++{
++	kfree(dev->otherend);
++	dev->otherend = NULL;
++}
 +
-+	err = pcifront_publish_info(pdev);
 +
-+      out:
-+	return err;
++static void free_otherend_watch(struct xenbus_device *dev)
++{
++	if (dev->otherend_watch.node) {
++		unregister_xenbus_watch(&dev->otherend_watch);
++		kfree(dev->otherend_watch.node);
++		dev->otherend_watch.node = NULL;
++	}
 +}
 +
-+static int pcifront_xenbus_remove(struct xenbus_device *xdev)
++
++int read_otherend_details(struct xenbus_device *xendev,
++				 char *id_node, char *path_node)
 +{
-+	if (xdev->dev.driver_data)
-+		free_pdev(xdev->dev.driver_data);
++	int err = xenbus_gather(XBT_NIL, xendev->nodename,
++				id_node, "%i", &xendev->otherend_id,
++				path_node, NULL, &xendev->otherend,
++				NULL);
++	if (err) {
++		xenbus_dev_fatal(xendev, err,
++				 "reading other end details from %s",
++				 xendev->nodename);
++		return err;
++	}
++	if (strlen(xendev->otherend) == 0 ||
++	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
++		xenbus_dev_fatal(xendev, -ENOENT,
++				 "unable to read other end from %s.  "
++				 "missing or inaccessible.",
++				 xendev->nodename);
++		free_otherend_details(xendev);
++		return -ENOENT;
++	}
 +
 +	return 0;
 +}
 +
-+static struct xenbus_device_id xenpci_ids[] = {
-+	{"pci"},
-+	{{0}},
-+};
-+MODULE_ALIAS("xen:pci");
-+
-+static struct xenbus_driver xenbus_pcifront_driver = {
-+	.name 			= "pcifront",
-+	.owner 			= THIS_MODULE,
-+	.ids 			= xenpci_ids,
-+	.probe 			= pcifront_xenbus_probe,
-+	.remove 		= pcifront_xenbus_remove,
-+	.otherend_changed 	= pcifront_backend_changed,
-+};
 +
-+static int __init pcifront_init(void)
++static int read_backend_details(struct xenbus_device *xendev)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
-+
-+	return xenbus_register_frontend(&xenbus_pcifront_driver);
++	return read_otherend_details(xendev, "backend-id", "backend");
 +}
 +
-+/* Initialize after the Xen PCI Frontend Stub is initialized */
-+subsys_initcall(pcifront_init);
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/privcmd/Makefile tmp-linux-2.6-xen.patch/drivers/xen/privcmd/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/privcmd/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/privcmd/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_XEN_PRIVCMD)	:= privcmd.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/privcmd/privcmd.c tmp-linux-2.6-xen.patch/drivers/xen/privcmd/privcmd.c
---- pristine-linux-2.6.18.2/drivers/xen/privcmd/privcmd.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/privcmd/privcmd.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,286 @@
-+/******************************************************************************
-+ * privcmd.c
-+ * 
-+ * Interface to privileged domain-0 commands.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
-+ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++static int xenbus_uevent_frontend(struct device *dev, char **envp,
++				  int num_envp, char *buffer, int buffer_size)
++{
++	struct xenbus_device *xdev;
++	int length = 0, i = 0;
 +
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/swap.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+#include <linux/kthread.h>
-+#include <asm/hypervisor.h>
++	if (dev == NULL)
++		return -ENODEV;
++	xdev = to_xenbus_device(dev);
++	if (xdev == NULL)
++		return -ENODEV;
 +
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <asm/hypervisor.h>
-+#include <xen/public/privcmd.h>
-+#include <xen/interface/xen.h>
-+#include <xen/xen_proc.h>
++	/* stuff we want to pass to /sbin/hotplug */
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "XENBUS_TYPE=%s", xdev->devicetype);
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "XENBUS_PATH=%s", xdev->nodename);
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "MODALIAS=xen:%s", xdev->devicetype);
 +
-+static struct proc_dir_entry *privcmd_intf;
-+static struct proc_dir_entry *capabilities_intf;
++	return 0;
++}
++#endif
 +
-+#ifndef HAVE_ARCH_PRIVCMD_MMAP
-+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++/* Bus type for frontend drivers. */
++static struct xen_bus_type xenbus_frontend = {
++	.root = "device",
++	.levels = 2, 		/* device/type/<id> */
++	.get_bus_id = frontend_bus_id,
++	.probe = xenbus_probe_frontend,
++	.error = -ENODEV,
++	.bus = {
++		.name     = "xen",
++		.match    = xenbus_match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		.probe    = xenbus_dev_probe,
++		.remove   = xenbus_dev_remove,
++		.shutdown = xenbus_dev_shutdown,
++		.uevent   = xenbus_uevent_frontend,
 +#endif
++	},
++	.dev = {
++		.bus_id = "xen",
++	},
++};
 +
-+static long privcmd_ioctl(struct file *file,
-+			  unsigned int cmd, unsigned long data)
++static void otherend_changed(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
 +{
-+	int ret = -ENOSYS;
-+	void __user *udata = (void __user *) data;
++	struct xenbus_device *dev =
++		container_of(watch, struct xenbus_device, otherend_watch);
++	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++	enum xenbus_state state;
++
++	/* Protect us against watches firing on old details when the otherend
++	   details change, say immediately after a resume. */
++	if (!dev->otherend ||
++	    strncmp(dev->otherend, vec[XS_WATCH_PATH],
++		    strlen(dev->otherend))) {
++		DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
++		return;
++	}
 +
-+	switch (cmd) {
-+	case IOCTL_PRIVCMD_HYPERCALL: {
-+		privcmd_hypercall_t hypercall;
-+  
-+		if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
-+			return -EFAULT;
++	state = xenbus_read_driver_state(dev->otherend);
 +
-+#if defined(__i386__)
-+		if (hypercall.op >= (PAGE_SIZE >> 5))
-+			break;
-+		__asm__ __volatile__ (
-+			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+			"pushl %%esi; pushl %%edi; "
-+			"movl  8(%%eax),%%ebx ;"
-+			"movl 16(%%eax),%%ecx ;"
-+			"movl 24(%%eax),%%edx ;"
-+			"movl 32(%%eax),%%esi ;"
-+			"movl 40(%%eax),%%edi ;"
-+			"movl   (%%eax),%%eax ;"
-+			"shll $5,%%eax ;"
-+			"addl $hypercall_page,%%eax ;"
-+			"call *%%eax ;"
-+			"popl %%edi; popl %%esi; popl %%edx; "
-+			"popl %%ecx; popl %%ebx"
-+			: "=a" (ret) : "0" (&hypercall) : "memory" );
-+#elif defined (__x86_64__)
-+		if (hypercall.op < (PAGE_SIZE >> 5)) {
-+			long ign1, ign2, ign3;
-+			__asm__ __volatile__ (
-+				"movq %8,%%r10; movq %9,%%r8;"
-+				"shll $5,%%eax ;"
-+				"addq $hypercall_page,%%rax ;"
-+				"call *%%rax"
-+				: "=a" (ret), "=D" (ign1),
-+				  "=S" (ign2), "=d" (ign3)
-+				: "0" ((unsigned int)hypercall.op),
-+				"1" (hypercall.arg[0]),
-+				"2" (hypercall.arg[1]),
-+				"3" (hypercall.arg[2]),
-+				"g" (hypercall.arg[3]),
-+				"g" (hypercall.arg[4])
-+				: "r8", "r10", "memory" );
-+		}
-+#elif defined (__ia64__)
-+		ret = privcmd_hypercall(&hypercall);
-+#endif
++	DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
++		dev->otherend_watch.node, vec[XS_WATCH_PATH]);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	/*
++	 * Ignore xenbus transitions during shutdown. This prevents us doing
++	 * work that can fail e.g., when the rootfs is gone.
++	 */
++	if (system_state > SYSTEM_RUNNING) {
++		struct xen_bus_type *bus = bus;
++		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
++		/* If we're frontend, drive the state machine to Closed. */
++		/* This should cause the backend to release our resources. */
++		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
++			xenbus_frontend_closed(dev);
++		return;
 +	}
-+	break;
++#endif
 +
-+	case IOCTL_PRIVCMD_MMAP: {
-+		privcmd_mmap_t mmapcmd;
-+		privcmd_mmap_entry_t msg;
-+		privcmd_mmap_entry_t __user *p;
-+		struct mm_struct *mm = current->mm;
-+		struct vm_area_struct *vma;
-+		unsigned long va;
-+		int i, rc;
++	if (drv->otherend_changed)
++		drv->otherend_changed(dev, state);
++}
 +
-+		if (!is_initial_xendomain())
-+			return -EPERM;
 +
-+		if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
-+			return -EFAULT;
++static int talk_to_otherend(struct xenbus_device *dev)
++{
++	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
 +
-+		p = mmapcmd.entry;
-+		if (copy_from_user(&msg, p, sizeof(msg)))
-+			return -EFAULT;
++	free_otherend_watch(dev);
++	free_otherend_details(dev);
 +
-+		down_read(&mm->mmap_sem);
++	return drv->read_otherend_details(dev);
++}
 +
-+		vma = find_vma(mm, msg.va);
-+		rc = -EINVAL;
-+		if (!vma || (msg.va != vma->vm_start) ||
-+		    !privcmd_enforce_singleshot_mapping(vma))
-+			goto mmap_out;
 +
-+		va = vma->vm_start;
++static int watch_otherend(struct xenbus_device *dev)
++{
++	return xenbus_watch_path2(dev, dev->otherend, "state",
++				  &dev->otherend_watch, otherend_changed);
++}
 +
-+		for (i = 0; i < mmapcmd.num; i++) {
-+			rc = -EFAULT;
-+			if (copy_from_user(&msg, p, sizeof(msg)))
-+				goto mmap_out;
 +
-+			/* Do not allow range to wrap the address space. */
-+			rc = -EINVAL;
-+			if ((msg.npages > (LONG_MAX >> PAGE_SHIFT)) ||
-+			    ((unsigned long)(msg.npages << PAGE_SHIFT) >= -va))
-+				goto mmap_out;
++int xenbus_dev_probe(struct device *_dev)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++	const struct xenbus_device_id *id;
++	int err;
 +
-+			/* Range chunks must be contiguous in va space. */
-+			if ((msg.va != va) ||
-+			    ((msg.va+(msg.npages<<PAGE_SHIFT)) > vma->vm_end))
-+				goto mmap_out;
++	DPRINTK("%s", dev->nodename);
 +
-+			if ((rc = direct_remap_pfn_range(
-+				vma,
-+				msg.va & PAGE_MASK, 
-+				msg.mfn, 
-+				msg.npages << PAGE_SHIFT, 
-+				vma->vm_page_prot,
-+				mmapcmd.dom)) < 0)
-+				goto mmap_out;
++	if (!drv->probe) {
++		err = -ENODEV;
++		goto fail;
++	}
 +
-+			p++;
-+			va += msg.npages << PAGE_SHIFT;
-+		}
++	id = match_device(drv->ids, dev);
++	if (!id) {
++		err = -ENODEV;
++		goto fail;
++	}
 +
-+		rc = 0;
++	err = talk_to_otherend(dev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: talk_to_otherend on %s failed.\n",
++		       dev->nodename);
++		return err;
++	}
 +
-+	mmap_out:
-+		up_read(&mm->mmap_sem);
-+		ret = rc;
++	err = drv->probe(dev, id);
++	if (err)
++		goto fail;
++
++	err = watch_otherend(dev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: watch_otherend on %s failed.\n",
++		       dev->nodename);
++		return err;
 +	}
-+	break;
 +
-+	case IOCTL_PRIVCMD_MMAPBATCH: {
-+		privcmd_mmapbatch_t m;
-+		struct mm_struct *mm = current->mm;
-+		struct vm_area_struct *vma;
-+		xen_pfn_t __user *p;
-+		unsigned long addr, mfn, nr_pages;
-+		int i;
++	return 0;
++fail:
++	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
++	xenbus_switch_state(dev, XenbusStateClosed);
++	return -ENODEV;
++}
 +
-+		if (!is_initial_xendomain())
-+			return -EPERM;
++int xenbus_dev_remove(struct device *_dev)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
 +
-+		if (copy_from_user(&m, udata, sizeof(m)))
-+			return -EFAULT;
++	DPRINTK("%s", dev->nodename);
 +
-+		nr_pages = m.num;
-+		if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
-+			return -EINVAL;
++	free_otherend_watch(dev);
++	free_otherend_details(dev);
 +
-+		down_read(&mm->mmap_sem);
++	if (drv->remove)
++		drv->remove(dev);
 +
-+		vma = find_vma(mm, m.addr);
-+		if (!vma ||
-+		    (m.addr != vma->vm_start) ||
-+		    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
-+		    !privcmd_enforce_singleshot_mapping(vma)) {
-+			up_read(&mm->mmap_sem);
-+			return -EINVAL;
-+		}
++	xenbus_switch_state(dev, XenbusStateClosed);
++	return 0;
++}
 +
-+		p = m.arr;
-+		addr = m.addr;
-+		for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
-+			if (get_user(mfn, p)) {
-+				up_read(&mm->mmap_sem);
-+				return -EFAULT;
-+			}
++static void xenbus_dev_shutdown(struct device *_dev)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	unsigned long timeout = 5*HZ;
 +
-+			ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
-+						     mfn, PAGE_SIZE,
-+						     vma->vm_page_prot, m.dom);
-+			if (ret < 0)
-+				put_user(0xF0000000 | mfn, p);
-+		}
++	DPRINTK("%s", dev->nodename);
 +
-+		up_read(&mm->mmap_sem);
-+		ret = 0;
-+	}
-+	break;
++	if (is_initial_xendomain())
++		return;
 +
-+	default:
-+		ret = -EINVAL;
-+		break;
++	get_device(&dev->dev);
++	if (dev->state != XenbusStateConnected) {
++		printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
++		       dev->nodename, xenbus_strstate(dev->state));
++		goto out;
 +	}
++	xenbus_switch_state(dev, XenbusStateClosing);
++	timeout = wait_for_completion_timeout(&dev->down, timeout);
++	if (!timeout)
++		printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
++ out:
++	put_device(&dev->dev);
++}
++
++int xenbus_register_driver_common(struct xenbus_driver *drv,
++				  struct xen_bus_type *bus)
++{
++	int ret;
++
++	if (bus->error)
++		return bus->error;
++
++	drv->driver.name = drv->name;
++	drv->driver.bus = &bus->bus;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++	drv->driver.owner = drv->owner;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++	drv->driver.probe = xenbus_dev_probe;
++	drv->driver.remove = xenbus_dev_remove;
++	drv->driver.shutdown = xenbus_dev_shutdown;
++#endif
 +
++	mutex_lock(&xenwatch_mutex);
++	ret = driver_register(&drv->driver);
++	mutex_unlock(&xenwatch_mutex);
 +	return ret;
 +}
 +
-+#ifndef HAVE_ARCH_PRIVCMD_MMAP
-+static struct page *privcmd_nopage(struct vm_area_struct *vma,
-+				   unsigned long address,
-+				   int *type)
++int xenbus_register_frontend(struct xenbus_driver *drv)
 +{
-+	return NOPAGE_SIGBUS;
-+}
++	int ret;
 +
-+static struct vm_operations_struct privcmd_vm_ops = {
-+	.nopage = privcmd_nopage
-+};
++	drv->read_otherend_details = read_backend_details;
 +
-+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-+{
-+#ifndef __powerpc__ /* PowerPC has a trick to safely do this. */
-+	/* Unsupported for auto-translate guests. */
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return -ENOSYS;
-+#endif
++	ret = xenbus_register_driver_common(drv, &xenbus_frontend);
++	if (ret)
++		return ret;
 +
-+	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
-+	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-+	vma->vm_ops = &privcmd_vm_ops;
-+	vma->vm_private_data = NULL;
++	/* If this driver is loaded as a module wait for devices to attach. */
++	wait_for_devices(drv);
 +
 +	return 0;
 +}
++EXPORT_SYMBOL_GPL(xenbus_register_frontend);
 +
-+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++void xenbus_unregister_driver(struct xenbus_driver *drv)
 +{
-+	return (xchg(&vma->vm_private_data, (void *)1) == NULL);
++	driver_unregister(&drv->driver);
 +}
-+#endif
++EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
 +
-+static const struct file_operations privcmd_file_ops = {
-+	.unlocked_ioctl = privcmd_ioctl,
-+	.mmap = privcmd_mmap,
++struct xb_find_info
++{
++	struct xenbus_device *dev;
++	const char *nodename;
 +};
 +
-+static int capabilities_read(char *page, char **start, off_t off,
-+			     int count, int *eof, void *data)
++static int cmp_dev(struct device *dev, void *data)
 +{
-+	int len = 0;
-+	*page = 0;
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct xb_find_info *info = data;
 +
-+	if (is_initial_xendomain())
-+		len = sprintf( page, "control_d\n" );
++	if (!strcmp(xendev->nodename, info->nodename)) {
++		info->dev = xendev;
++		get_device(dev);
++		return 1;
++	}
++	return 0;
++}
 +
-+	*eof = 1;
-+	return len;
++struct xenbus_device *xenbus_device_find(const char *nodename,
++					 struct bus_type *bus)
++{
++	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
++
++	bus_for_each_dev(bus, NULL, &info, cmp_dev);
++	return info.dev;
 +}
 +
-+static int __init privcmd_init(void)
++static int cleanup_dev(struct device *dev, void *data)
 +{
-+	if (!is_running_on_xen())
-+		return -ENODEV;
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct xb_find_info *info = data;
++	int len = strlen(info->nodename);
 +
-+	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-+	if (privcmd_intf != NULL)
-+		privcmd_intf->proc_fops = &privcmd_file_ops;
++	DPRINTK("%s", info->nodename);
 +
-+	capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
-+	if (capabilities_intf != NULL)
-+		capabilities_intf->read_proc = capabilities_read;
++	/* Match the info->nodename path, or any subdirectory of that path. */
++	if (strncmp(xendev->nodename, info->nodename, len))
++		return 0;
 +
-+	return 0;
++	/* If the node name is longer, ensure it really is a subdirectory. */
++	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
++		return 0;
++
++	info->dev = xendev;
++	get_device(dev);
++	return 1;
 +}
 +
-+__initcall(privcmd_init);
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/tpmback/common.h tmp-linux-2.6-xen.patch/drivers/xen/tpmback/common.h
---- pristine-linux-2.6.18.2/drivers/xen/tpmback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/tpmback/common.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,85 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/common.h
-+ */
++static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
++{
++	struct xb_find_info info = { .nodename = path };
 +
-+#ifndef __TPM__BACKEND__COMMON_H__
-+#define __TPM__BACKEND__COMMON_H__
++	do {
++		info.dev = NULL;
++		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
++		if (info.dev) {
++			device_unregister(&info.dev->dev);
++			put_device(&info.dev->dev);
++		}
++	} while (info.dev);
++}
 +
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <xen/evtchn.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
++static void xenbus_dev_release(struct device *dev)
++{
++	if (dev)
++		kfree(to_xenbus_device(dev));
++}
 +
-+#define DPRINTK(_f, _a...)			\
-+	pr_debug("(file=%s, line=%d) " _f,	\
-+		 __FILE__ , __LINE__ , ## _a )
++static ssize_t xendev_show_nodename(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++				    struct device_attribute *attr,
++#endif
++				    char *buf)
++{
++	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
++}
++DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
 +
-+struct backend_info;
++static ssize_t xendev_show_devtype(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++				   struct device_attribute *attr,
++#endif
++				   char *buf)
++{
++	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
++}
++DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
 +
-+typedef struct tpmif_st {
-+	struct list_head tpmif_list;
-+	/* Unique identifier for this interface. */
-+	domid_t domid;
-+	unsigned int handle;
 +
-+	/* Physical parameters of the comms window. */
-+	unsigned int irq;
++int xenbus_probe_node(struct xen_bus_type *bus,
++		      const char *type,
++		      const char *nodename)
++{
++	int err;
++	struct xenbus_device *xendev;
++	size_t stringlen;
++	char *tmpstring;
 +
-+	/* The shared rings and indexes. */
-+	tpmif_tx_interface_t *tx;
-+	struct vm_struct *tx_area;
++	enum xenbus_state state = xenbus_read_driver_state(nodename);
 +
-+	/* Miscellaneous private stuff. */
-+	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+	int active;
++	if (bus->error)
++		return bus->error;
 +
-+	struct tpmif_st *hash_next;
-+	struct list_head list;	/* scheduling list */
-+	atomic_t refcnt;
++	if (state != XenbusStateInitialising) {
++		/* Device is not new, so ignore it.  This can happen if a
++		   device is going away after switching to Closed.  */
++		return 0;
++	}
 +
-+	struct backend_info *bi;
++	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
++	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
++	if (!xendev)
++		return -ENOMEM;
 +
-+	grant_handle_t shmem_handle;
-+	grant_ref_t shmem_ref;
-+	struct page **mmap_pages;
++	xendev->state = XenbusStateInitialising;
 +
-+	char devname[20];
-+} tpmif_t;
++	/* Copy the strings into the extra space. */
 +
-+void tpmif_disconnect_complete(tpmif_t * tpmif);
-+tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
-+void tpmif_interface_init(void);
-+void tpmif_interface_exit(void);
-+void tpmif_schedule_work(tpmif_t * tpmif);
-+void tpmif_deschedule_work(tpmif_t * tpmif);
-+void tpmif_xenbus_init(void);
-+void tpmif_xenbus_exit(void);
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
-+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++	tmpstring = (char *)(xendev + 1);
++	strcpy(tmpstring, nodename);
++	xendev->nodename = tmpstring;
 +
-+long int tpmback_get_instance(struct backend_info *bi);
++	tmpstring += strlen(tmpstring) + 1;
++	strcpy(tmpstring, type);
++	xendev->devicetype = tmpstring;
++	init_completion(&xendev->down);
 +
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++	xendev->dev.parent = &bus->dev;
++	xendev->dev.bus = &bus->bus;
++	xendev->dev.release = xenbus_dev_release;
 +
++	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
++	if (err)
++		goto fail;
 +
-+#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define tpmif_put(_b)					\
-+	do {						\
-+		if (atomic_dec_and_test(&(_b)->refcnt))	\
-+			tpmif_disconnect_complete(_b);	\
-+	} while (0)
++	/* Register with generic device framework. */
++	err = device_register(&xendev->dev);
++	if (err)
++		goto fail;
 +
-+extern int num_frontends;
++	err = device_create_file(&xendev->dev, &dev_attr_nodename);
++	if (err)
++		goto unregister;
++	err = device_create_file(&xendev->dev, &dev_attr_devtype);
++	if (err)
++		goto unregister;
 +
-+static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
-+{
-+	return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
++	return 0;
++unregister:
++	device_remove_file(&xendev->dev, &dev_attr_nodename);
++	device_remove_file(&xendev->dev, &dev_attr_devtype);
++	device_unregister(&xendev->dev);
++fail:
++	kfree(xendev);
++	return err;
 +}
 +
-+#endif /* __TPMIF__BACKEND__COMMON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/tpmback/interface.c tmp-linux-2.6-xen.patch/drivers/xen/tpmback/interface.c
---- pristine-linux-2.6.18.2/drivers/xen/tpmback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/tpmback/interface.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,167 @@
-+ /*****************************************************************************
-+ * drivers/xen/tpmback/interface.c
-+ *
-+ * Vritual TPM interface management.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ *
-+ * This code has been derived from drivers/xen/netback/interface.c
-+ * Copyright (c) 2004, Keir Fraser
-+ */
++/* device/<typename>/<name> */
++static int xenbus_probe_frontend(const char *type, const char *name)
++{
++	char *nodename;
++	int err;
 +
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/gnttab.h>
++	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
++	if (!nodename)
++		return -ENOMEM;
 +
-+static kmem_cache_t *tpmif_cachep;
-+int num_frontends = 0;
++	DPRINTK("%s", nodename);
 +
-+LIST_HEAD(tpmif_list);
++	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
++	kfree(nodename);
++	return err;
++}
 +
-+static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
++static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
 +{
-+	tpmif_t *tpmif;
++	int err = 0;
++	char **dir;
++	unsigned int dir_n = 0;
++	int i;
 +
-+	tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
-+	if (tpmif == NULL)
-+		goto out_of_memory;
++	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
++	if (IS_ERR(dir))
++		return PTR_ERR(dir);
 +
-+	memset(tpmif, 0, sizeof (*tpmif));
-+	tpmif->domid = domid;
-+	tpmif->status = DISCONNECTED;
-+	tpmif->bi = bi;
-+	snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
-+	atomic_set(&tpmif->refcnt, 1);
++	for (i = 0; i < dir_n; i++) {
++		err = bus->probe(type, dir[i]);
++		if (err)
++			break;
++	}
++	kfree(dir);
++	return err;
++}
 +
-+	tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
-+	if (tpmif->mmap_pages == NULL)
-+		goto out_of_memory;
++int xenbus_probe_devices(struct xen_bus_type *bus)
++{
++	int err = 0;
++	char **dir;
++	unsigned int i, dir_n;
 +
-+	list_add(&tpmif->tpmif_list, &tpmif_list);
-+	num_frontends++;
++	if (bus->error)
++		return bus->error;
 +
-+	return tpmif;
++	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
++	if (IS_ERR(dir))
++		return PTR_ERR(dir);
 +
-+ out_of_memory:
-+	if (tpmif != NULL)
-+		kmem_cache_free(tpmif_cachep, tpmif);
-+	printk("%s: out of memory\n", __FUNCTION__);
-+	return ERR_PTR(-ENOMEM);
++	for (i = 0; i < dir_n; i++) {
++		err = xenbus_probe_device_type(bus, dir[i]);
++		if (err)
++			break;
++	}
++	kfree(dir);
++	return err;
 +}
 +
-+static void free_tpmif(tpmif_t * tpmif)
++static unsigned int char_count(const char *str, char c)
 +{
-+	num_frontends--;
-+	list_del(&tpmif->tpmif_list);
-+	free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
-+	kmem_cache_free(tpmif_cachep, tpmif);
++	unsigned int i, ret = 0;
++
++	for (i = 0; str[i]; i++)
++		if (str[i] == c)
++			ret++;
++	return ret;
 +}
 +
-+tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
++static int strsep_len(const char *str, char c, unsigned int len)
 +{
-+	tpmif_t *tpmif;
++	unsigned int i;
 +
-+	list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
-+		if (tpmif->bi == bi) {
-+			if (tpmif->domid == domid) {
-+				tpmif_get(tpmif);
-+				return tpmif;
-+			} else {
-+				return ERR_PTR(-EEXIST);
-+			}
++	for (i = 0; str[i]; i++)
++		if (str[i] == c) {
++			if (len == 0)
++				return i;
++			len--;
 +		}
++	return (len == 0) ? i : -ERANGE;
++}
++
++void dev_changed(const char *node, struct xen_bus_type *bus)
++{
++	int exists, rootlen;
++	struct xenbus_device *dev;
++	char type[BUS_ID_SIZE];
++	const char *p, *root;
++
++	if (bus->error || char_count(node, '/') < 2)
++ 		return;
++
++	exists = xenbus_exists(XBT_NIL, node, "");
++	if (!exists) {
++		xenbus_cleanup_devices(node, &bus->bus);
++		return;
 +	}
 +
-+	return alloc_tpmif(domid, bi);
++	/* backend/<type>/... or device/<type>/... */
++	p = strchr(node, '/') + 1;
++	snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
++	type[BUS_ID_SIZE-1] = '\0';
++
++	rootlen = strsep_len(node, '/', bus->levels);
++	if (rootlen < 0)
++		return;
++	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
++	if (!root)
++		return;
++
++	dev = xenbus_device_find(root, &bus->bus);
++	if (!dev)
++		xenbus_probe_node(bus, type, root);
++	else
++		put_device(&dev->dev);
++
++	kfree(root);
 +}
 +
-+static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
++static void frontend_changed(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
 +{
-+	struct gnttab_map_grant_ref op;
++	DPRINTK("");
 +
-+	gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
-+			  GNTMAP_host_map, shared_page, tpmif->domid);
++	dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
++}
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++/* We watch for devices appearing and vanishing. */
++static struct xenbus_watch fe_watch = {
++	.node = "device",
++	.callback = frontend_changed,
++};
 +
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
++static int suspend_dev(struct device *dev, void *data)
++{
++	int err = 0;
++	struct xenbus_driver *drv;
++	struct xenbus_device *xdev;
 +
-+	tpmif->shmem_ref = shared_page;
-+	tpmif->shmem_handle = op.handle;
++	DPRINTK("");
 +
++	if (dev->driver == NULL)
++		return 0;
++	drv = to_xenbus_driver(dev->driver);
++	xdev = container_of(dev, struct xenbus_device, dev);
++	if (drv->suspend)
++		err = drv->suspend(xdev);
++	if (err)
++		printk(KERN_WARNING
++		       "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
 +	return 0;
 +}
 +
-+static void unmap_frontend_page(tpmif_t *tpmif)
++static int suspend_cancel_dev(struct device *dev, void *data)
 +{
-+	struct gnttab_unmap_grant_ref op;
++	int err = 0;
++	struct xenbus_driver *drv;
++	struct xenbus_device *xdev;
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
-+			    GNTMAP_host_map, tpmif->shmem_handle);
++	DPRINTK("");
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	if (dev->driver == NULL)
++		return 0;
++	drv = to_xenbus_driver(dev->driver);
++	xdev = container_of(dev, struct xenbus_device, dev);
++	if (drv->suspend_cancel)
++		err = drv->suspend_cancel(xdev);
++	if (err)
++		printk(KERN_WARNING
++		       "xenbus: suspend_cancel %s failed: %i\n",
++		       dev->bus_id, err);
++	return 0;
 +}
 +
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
++static int resume_dev(struct device *dev, void *data)
 +{
 +	int err;
++	struct xenbus_driver *drv;
++	struct xenbus_device *xdev;
 +
-+	if (tpmif->irq)
++	DPRINTK("");
++
++	if (dev->driver == NULL)
 +		return 0;
 +
-+	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+		return -ENOMEM;
++	drv = to_xenbus_driver(dev->driver);
++	xdev = container_of(dev, struct xenbus_device, dev);
 +
-+	err = map_frontend_page(tpmif, shared_page);
++	err = talk_to_otherend(xdev);
 +	if (err) {
-+		free_vm_area(tpmif->tx_area);
++		printk(KERN_WARNING
++		       "xenbus: resume (talk_to_otherend) %s failed: %i\n",
++		       dev->bus_id, err);
 +		return err;
 +	}
 +
-+	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++	xdev->state = XenbusStateInitialising;
 +
-+	err = bind_interdomain_evtchn_to_irqhandler(
-+		tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
-+	if (err < 0) {
-+		unmap_frontend_page(tpmif);
-+		free_vm_area(tpmif->tx_area);
-+		return err;
++	if (drv->resume) {
++		err = drv->resume(xdev);
++		if (err) { 
++			printk(KERN_WARNING
++			       "xenbus: resume %s failed: %i\n", 
++			       dev->bus_id, err);
++			return err;
++		}
 +	}
-+	tpmif->irq = err;
 +
-+	tpmif->shmem_ref = shared_page;
-+	tpmif->active = 1;
++	err = watch_otherend(xdev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: resume (watch_otherend) %s failed: "
++		       "%d.\n", dev->bus_id, err);
++		return err;
++	}
 +
 +	return 0;
 +}
 +
-+void tpmif_disconnect_complete(tpmif_t *tpmif)
++void xenbus_suspend(void)
 +{
-+	if (tpmif->irq)
-+		unbind_from_irqhandler(tpmif->irq, tpmif);
-+
-+	if (tpmif->tx) {
-+		unmap_frontend_page(tpmif);
-+		free_vm_area(tpmif->tx_area);
-+	}
++	DPRINTK("");
 +
-+	free_tpmif(tpmif);
++	if (!xenbus_frontend.error)
++		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
++	xenbus_backend_suspend(suspend_dev);
++	xs_suspend();
 +}
++EXPORT_SYMBOL_GPL(xenbus_suspend);
 +
-+void __init tpmif_interface_init(void)
++void xenbus_resume(void)
 +{
-+	tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
-+					 0, 0, NULL, NULL);
++	xb_init_comms();
++	xs_resume();
++	if (!xenbus_frontend.error)
++		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
++	xenbus_backend_resume(resume_dev);
 +}
++EXPORT_SYMBOL_GPL(xenbus_resume);
 +
-+void __exit tpmif_interface_exit(void)
++void xenbus_suspend_cancel(void)
 +{
-+	kmem_cache_destroy(tpmif_cachep);
++	xs_suspend_cancel();
++	if (!xenbus_frontend.error)
++		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
++	xenbus_backend_resume(suspend_cancel_dev);
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/tpmback/Makefile tmp-linux-2.6-xen.patch/drivers/xen/tpmback/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/tpmback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/tpmback/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
-+
-+tpmbk-y += tpmback.o interface.o xenbus.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/tpmback/tpmback.c tmp-linux-2.6-xen.patch/drivers/xen/tpmback/tpmback.c
---- pristine-linux-2.6.18.2/drivers/xen/tpmback/tpmback.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/tpmback/tpmback.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,944 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/tpmback.c
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netback/netback.c
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/miscdevice.h>
-+#include <linux/poll.h>
-+#include <asm/uaccess.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+
-+/* local data structures */
-+struct data_exchange {
-+	struct list_head pending_pak;
-+	struct list_head current_pak;
-+	unsigned int copied_so_far;
-+	u8 has_opener:1;
-+	u8 aborted:1;
-+	rwlock_t pak_lock;	// protects all of the previous fields
-+	wait_queue_head_t wait_queue;
-+};
-+
-+struct vtpm_resp_hdr {
-+	uint32_t instance_no;
-+	uint16_t tag_no;
-+	uint32_t len_no;
-+	uint32_t ordinal_no;
-+} __attribute__ ((packed));
-+
-+struct packet {
-+	struct list_head next;
-+	unsigned int data_len;
-+	u8 *data_buffer;
-+	tpmif_t *tpmif;
-+	u32 tpm_instance;
-+	u8 req_tag;
-+	u32 last_read;
-+	u8 flags;
-+	struct timer_list processing_timer;
-+};
-+
-+enum {
-+	PACKET_FLAG_DISCARD_RESPONSE = 1,
-+};
++EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
 +
-+/* local variables */
-+static struct data_exchange dataex;
++/* A flag to determine if xenstored is 'ready' (i.e. has started) */
++int xenstored_ready = 0;
 +
-+/* local function prototypes */
-+static int _packet_write(struct packet *pak,
-+			 const char *data, size_t size, int userbuffer);
-+static void processing_timeout(unsigned long ptr);
-+static int packet_read_shmem(struct packet *pak,
-+			     tpmif_t * tpmif,
-+			     u32 offset,
-+			     char *buffer, int isuserbuffer, u32 left);
-+static int vtpm_queue_packet(struct packet *pak);
 +
-+/***************************************************************
-+ Buffer copying fo user and kernel space buffes.
-+***************************************************************/
-+static inline int copy_from_buffer(void *to,
-+				   const void *from, unsigned long size,
-+				   int isuserbuffer)
++int register_xenstore_notifier(struct notifier_block *nb)
 +{
-+	if (isuserbuffer) {
-+		if (copy_from_user(to, (void __user *)from, size))
-+			return -EFAULT;
-+	} else {
-+		memcpy(to, from, size);
-+	}
-+	return 0;
-+}
++	int ret = 0;
 +
-+static inline int copy_to_buffer(void *to,
-+				 const void *from, unsigned long size,
-+				 int isuserbuffer)
-+{
-+	if (isuserbuffer) {
-+		if (copy_to_user((void __user *)to, from, size))
-+			return -EFAULT;
-+	} else {
-+		memcpy(to, from, size);
-+	}
-+	return 0;
-+}
++	if (xenstored_ready > 0)
++		ret = nb->notifier_call(nb, 0, NULL);
++	else
++		blocking_notifier_chain_register(&xenstore_chain, nb);
 +
++	return ret;
++}
++EXPORT_SYMBOL_GPL(register_xenstore_notifier);
 +
-+static void dataex_init(struct data_exchange *dataex)
++void unregister_xenstore_notifier(struct notifier_block *nb)
 +{
-+	INIT_LIST_HEAD(&dataex->pending_pak);
-+	INIT_LIST_HEAD(&dataex->current_pak);
-+	dataex->has_opener = 0;
-+	rwlock_init(&dataex->pak_lock);
-+	init_waitqueue_head(&dataex->wait_queue);
++	blocking_notifier_chain_unregister(&xenstore_chain, nb);
 +}
++EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 +
-+/***************************************************************
-+ Packet-related functions
-+***************************************************************/
 +
-+static struct packet *packet_find_instance(struct list_head *head,
-+					   u32 tpm_instance)
++void xenbus_probe(void *unused)
 +{
-+	struct packet *pak;
-+	struct list_head *p;
++	BUG_ON((xenstored_ready <= 0));
 +
-+	/*
-+	 * traverse the list of packets and return the first
-+	 * one with the given instance number
-+	 */
-+	list_for_each(p, head) {
-+		pak = list_entry(p, struct packet, next);
++	/* Enumerate devices in xenstore and watch for changes. */
++	xenbus_probe_devices(&xenbus_frontend);
++	register_xenbus_watch(&fe_watch);
++	xenbus_backend_probe_and_watch();
 +
-+		if (pak->tpm_instance == tpm_instance) {
-+			return pak;
-+		}
-+	}
-+	return NULL;
++	/* Notify others that xenstore is up */
++	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 +}
 +
-+static struct packet *packet_find_packet(struct list_head *head, void *packet)
-+{
-+	struct packet *pak;
-+	struct list_head *p;
-+
-+	/*
-+	 * traverse the list of packets and return the first
-+	 * one with the given instance number
-+	 */
-+	list_for_each(p, head) {
-+		pak = list_entry(p, struct packet, next);
 +
-+		if (pak == packet) {
-+			return pak;
-+		}
-+	}
-+	return NULL;
-+}
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++static struct file_operations xsd_kva_fops;
++static struct proc_dir_entry *xsd_kva_intf;
++static struct proc_dir_entry *xsd_port_intf;
 +
-+static struct packet *packet_alloc(tpmif_t * tpmif,
-+				   u32 size, u8 req_tag, u8 flags)
++static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
 +{
-+	struct packet *pak = NULL;
-+	pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
-+	if (NULL != pak) {
-+		if (tpmif) {
-+			pak->tpmif = tpmif;
-+			pak->tpm_instance = tpmback_get_instance(tpmif->bi);
-+			tpmif_get(tpmif);
-+		}
-+		pak->data_len = size;
-+		pak->req_tag = req_tag;
-+		pak->last_read = 0;
-+		pak->flags = flags;
-+
-+		/*
-+		 * cannot do tpmif_get(tpmif); bad things happen
-+		 * on the last tpmif_put()
-+		 */
-+		init_timer(&pak->processing_timer);
-+		pak->processing_timer.function = processing_timeout;
-+		pak->processing_timer.data = (unsigned long)pak;
-+	}
-+	return pak;
-+}
++	size_t size = vma->vm_end - vma->vm_start;
 +
-+static void inline packet_reset(struct packet *pak)
-+{
-+	pak->last_read = 0;
-+}
++	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
++		return -EINVAL;
 +
-+static void packet_free(struct packet *pak)
-+{
-+	if (timer_pending(&pak->processing_timer)) {
-+		BUG();
-+	}
++	if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
++			    size, vma->vm_page_prot))
++		return -EAGAIN;
 +
-+	if (pak->tpmif)
-+		tpmif_put(pak->tpmif);
-+	kfree(pak->data_buffer);
-+	/*
-+	 * cannot do tpmif_put(pak->tpmif); bad things happen
-+	 * on the last tpmif_put()
-+	 */
-+	kfree(pak);
++	return 0;
 +}
 +
-+
-+/*
-+ * Write data to the shared memory and send it to the FE.
-+ */
-+static int packet_write(struct packet *pak,
-+			const char *data, size_t size, int isuserbuffer)
++static int xsd_kva_read(char *page, char **start, off_t off,
++			int count, int *eof, void *data)
 +{
-+	int rc = 0;
-+
-+	if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
-+		/* Don't send a respone to this packet. Just acknowledge it. */
-+		rc = size;
-+	} else {
-+		rc = _packet_write(pak, data, size, isuserbuffer);
-+	}
++	int len;
 +
-+	return rc;
++	len  = sprintf(page, "0x%p", xen_store_interface);
++	*eof = 1;
++	return len;
 +}
 +
-+int _packet_write(struct packet *pak,
-+		  const char *data, size_t size, int isuserbuffer)
++static int xsd_port_read(char *page, char **start, off_t off,
++			 int count, int *eof, void *data)
 +{
-+	/*
-+	 * Write into the shared memory pages directly
-+	 * and send it to the front end.
-+	 */
-+	tpmif_t *tpmif = pak->tpmif;
-+	grant_handle_t handle;
-+	int rc = 0;
-+	unsigned int i = 0;
-+	unsigned int offset = 0;
++	int len;
++
++	len  = sprintf(page, "%d", xen_store_evtchn);
++	*eof = 1;
++	return len;
++}
++#endif
 +
-+	if (tpmif == NULL) {
-+		return -EFAULT;
-+	}
++static int xenbus_probe_init(void)
++{
++	int err = 0;
++	unsigned long page = 0;
 +
-+	if (tpmif->status == DISCONNECTED) {
-+		return size;
-+	}
++	DPRINTK("");
 +
-+	while (offset < size && i < TPMIF_TX_RING_SIZE) {
-+		unsigned int tocopy;
-+		struct gnttab_map_grant_ref map_op;
-+		struct gnttab_unmap_grant_ref unmap_op;
-+		tpmif_tx_request_t *tx;
++	if (!is_running_on_xen())
++		return -ENODEV;
 +
-+		tx = &tpmif->tx->ring[i].req;
++	/* Register ourselves with the kernel bus subsystem */
++	xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
++	if (xenbus_frontend.error)
++		printk(KERN_WARNING
++		       "XENBUS: Error registering frontend bus: %i\n",
++		       xenbus_frontend.error);
++	xenbus_backend_bus_register();
 +
-+		if (0 == tx->addr) {
-+			DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
-+			return 0;
-+		}
++	/*
++	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
++	 */
++	if (is_initial_xendomain()) {
++		struct evtchn_alloc_unbound alloc_unbound;
 +
-+		gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
-+				  GNTMAP_host_map, tx->ref, tpmif->domid);
++		/* Allocate page. */
++		page = get_zeroed_page(GFP_KERNEL);
++		if (!page)
++			return -ENOMEM;
 +
-+		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+						       &map_op, 1))) {
-+			BUG();
-+		}
++		xen_store_mfn = xen_start_info->store_mfn =
++			pfn_to_mfn(virt_to_phys((void *)page) >>
++				   PAGE_SHIFT);
 +
-+		handle = map_op.handle;
++		/* Next allocate a local port which xenstored can bind to */
++		alloc_unbound.dom        = DOMID_SELF;
++		alloc_unbound.remote_dom = 0;
 +
-+		if (map_op.status) {
-+			DPRINTK(" Grant table operation failure !\n");
-+			return 0;
++		err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++						  &alloc_unbound);
++		if (err == -ENOSYS)
++			goto err;
++		BUG_ON(err);
++		xen_store_evtchn = xen_start_info->store_evtchn =
++			alloc_unbound.port;
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++		/* And finally publish the above info in /proc/xen */
++		xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
++		if (xsd_kva_intf) {
++			memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
++			       sizeof(xsd_kva_fops));
++			xsd_kva_fops.mmap = xsd_kva_mmap;
++			xsd_kva_intf->proc_fops = &xsd_kva_fops;
++			xsd_kva_intf->read_proc = xsd_kva_read;
 +		}
++		xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
++		if (xsd_port_intf)
++			xsd_port_intf->read_proc = xsd_port_read;
++#endif
++		xen_store_interface = mfn_to_virt(xen_store_mfn);
++	} else {
++		xenstored_ready = 1;
++#ifdef CONFIG_XEN
++		xen_store_evtchn = xen_start_info->store_evtchn;
++		xen_store_mfn = xen_start_info->store_mfn;
++		xen_store_interface = mfn_to_virt(xen_store_mfn);
++#else
++		xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
++		xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
++		xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
++					      PAGE_SIZE);
++#endif
++	}
 +
-+		tocopy = min_t(size_t, size - offset, PAGE_SIZE);
 +
-+		if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
-+					      (tx->addr & ~PAGE_MASK)),
-+				     &data[offset], tocopy, isuserbuffer)) {
-+			tpmif_put(tpmif);
-+			return -EFAULT;
-+		}
-+		tx->size = tocopy;
++	xenbus_dev_init();
 +
-+		gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
-+				    GNTMAP_host_map, handle);
++	/* Initialize the interface to xenstore. */
++	err = xs_init();
++	if (err) {
++		printk(KERN_WARNING
++		       "XENBUS: Error initializing xenstore comms: %i\n", err);
++		goto err;
++	}
 +
-+		if (unlikely
-+		    (HYPERVISOR_grant_table_op
-+		     (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+			BUG();
++	/* Register ourselves with the kernel device subsystem */
++	if (!xenbus_frontend.error) {
++		xenbus_frontend.error = device_register(&xenbus_frontend.dev);
++		if (xenbus_frontend.error) {
++			bus_unregister(&xenbus_frontend.bus);
++			printk(KERN_WARNING
++			       "XENBUS: Error registering frontend device: %i\n",
++			       xenbus_frontend.error);
 +		}
-+
-+		offset += tocopy;
-+		i++;
 +	}
++	xenbus_backend_device_register();
 +
-+	rc = offset;
-+	DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
-+	notify_remote_via_irq(tpmif->irq);
++	if (!is_initial_xendomain())
++		xenbus_probe(NULL);
 +
-+	return rc;
-+}
++	return 0;
 +
-+/*
-+ * Read data from the shared memory and copy it directly into the
-+ * provided buffer. Advance the read_last indicator which tells
-+ * how many bytes have already been read.
-+ */
-+static int packet_read(struct packet *pak, size_t numbytes,
-+		       char *buffer, size_t buffersize, int isuserbuffer)
-+{
-+	tpmif_t *tpmif = pak->tpmif;
++ err:
++	if (page)
++		free_page(page);
 +
 +	/*
-+	 * Read 'numbytes' of data from the buffer. The first 4
-+	 * bytes are the instance number in network byte order,
-+	 * after that come the data from the shared memory buffer.
++	 * Do not unregister the xenbus front/backend buses here. The buses
++	 * must exist because front/backend drivers will use them when they are
++	 * registered.
 +	 */
-+	u32 to_copy;
-+	u32 offset = 0;
-+	u32 room_left = buffersize;
-+
-+	if (pak->last_read < 4) {
-+		/*
-+		 * copy the instance number into the buffer
-+		 */
-+		u32 instance_no = htonl(pak->tpm_instance);
-+		u32 last_read = pak->last_read;
 +
-+		to_copy = min_t(size_t, 4 - last_read, numbytes);
++	return err;
++}
 +
-+		if (copy_to_buffer(&buffer[0],
-+				   &(((u8 *) & instance_no)[last_read]),
-+				   to_copy, isuserbuffer)) {
-+			return -EFAULT;
-+		}
++#ifdef CONFIG_XEN
++postcore_initcall(xenbus_probe_init);
++MODULE_LICENSE("Dual BSD/GPL");
++#else
++int xenbus_init(void)
++{
++	return xenbus_probe_init();
++}
++#endif
 +
-+		pak->last_read += to_copy;
-+		offset += to_copy;
-+		room_left -= to_copy;
-+	}
++static int is_disconnected_device(struct device *dev, void *data)
++{
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct device_driver *drv = data;
++	struct xenbus_driver *xendrv;
 +
 +	/*
-+	 * If the packet has a data buffer appended, read from it...
++	 * A device with no driver will never connect. We care only about
++	 * devices which should currently be in the process of connecting.
 +	 */
++	if (!dev->driver)
++		return 0;
 +
-+	if (room_left > 0) {
-+		if (pak->data_buffer) {
-+			u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
-+			u32 last_read = pak->last_read - 4;
++	/* Is this search limited to a particular driver? */
++	if (drv && (dev->driver != drv))
++		return 0;
 +
-+			if (copy_to_buffer(&buffer[offset],
-+					   &pak->data_buffer[last_read],
-+					   to_copy, isuserbuffer)) {
-+				return -EFAULT;
-+			}
-+			pak->last_read += to_copy;
-+			offset += to_copy;
-+		} else {
-+			offset = packet_read_shmem(pak,
-+						   tpmif,
-+						   offset,
-+						   buffer,
-+						   isuserbuffer, room_left);
-+		}
-+	}
-+	return offset;
++	xendrv = to_xenbus_driver(dev->driver);
++	return (xendev->state < XenbusStateConnected ||
++		(xendrv->is_ready && !xendrv->is_ready(xendev)));
 +}
 +
-+static int packet_read_shmem(struct packet *pak,
-+			     tpmif_t * tpmif,
-+			     u32 offset, char *buffer, int isuserbuffer,
-+			     u32 room_left)
++static int exists_disconnected_device(struct device_driver *drv)
 +{
-+	u32 last_read = pak->last_read - 4;
-+	u32 i = (last_read / PAGE_SIZE);
-+	u32 pg_offset = last_read & (PAGE_SIZE - 1);
-+	u32 to_copy;
-+	grant_handle_t handle;
++	if (xenbus_frontend.error)
++		return xenbus_frontend.error;
++	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++				is_disconnected_device);
++}
 +
-+	tpmif_tx_request_t *tx;
++static int print_device_status(struct device *dev, void *data)
++{
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct device_driver *drv = data;
++	struct xenbus_driver *xendrv;
 +
-+	tx = &tpmif->tx->ring[0].req;
-+	/*
-+	 * Start copying data at the page with index 'index'
-+	 * and within that page at offset 'offset'.
-+	 * Copy a maximum of 'room_left' bytes.
-+	 */
-+	to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
-+	while (to_copy > 0) {
-+		void *src;
-+		struct gnttab_map_grant_ref map_op;
-+		struct gnttab_unmap_grant_ref unmap_op;
++	/* Is this operation limited to a particular driver? */
++	if (drv && (dev->driver != drv))
++		return 0;
 +
-+		tx = &tpmif->tx->ring[i].req;
++	if (!dev->driver) {
++		/* Information only: is this too noisy? */
++		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
++		       xendev->nodename);
++		return 0;
++	}
 +
-+		gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
-+				  GNTMAP_host_map, tx->ref, tpmif->domid);
++	if (xendev->state < XenbusStateConnected) {
++		enum xenbus_state rstate = XenbusStateUnknown;
++		if (xendev->otherend)
++			rstate = xenbus_read_driver_state(xendev->otherend);
++		printk(KERN_WARNING "XENBUS: Timeout connecting "
++		       "to device: %s (local state %d, remote state %d)\n",
++		       xendev->nodename, xendev->state, rstate);
++	}
 +
-+		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+						       &map_op, 1))) {
-+			BUG();
-+		}
++	xendrv = to_xenbus_driver(dev->driver);
++	if (xendrv->is_ready && !xendrv->is_ready(xendev))
++		printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
++		       xendev->nodename);
 +
-+		if (map_op.status) {
-+			DPRINTK(" Grant table operation failure !\n");
-+			return -EFAULT;
-+		}
++	return 0;
++}
 +
-+		handle = map_op.handle;
++/* We only wait for device setup after most initcalls have run. */
++static int ready_to_wait_for_devices;
 +
-+		if (to_copy > tx->size) {
-+			/*
-+			 * User requests more than what's available
-+			 */
-+			to_copy = min_t(u32, tx->size, to_copy);
-+		}
++/*
++ * On a 5-minute timeout, wait for all devices currently configured.  We need
++ * to do this to guarantee that the filesystems and / or network devices
++ * needed for boot are available, before we can allow the boot to proceed.
++ *
++ * This needs to be on a late_initcall, to happen after the frontend device
++ * drivers have been initialised, but before the root fs is mounted.
++ *
++ * A possible improvement here would be to have the tools add a per-device
++ * flag to the store entry, indicating whether it is needed at boot time.
++ * This would allow people who knew what they were doing to accelerate their
++ * boot slightly, but of course needs tools or manual intervention to set up
++ * those flags correctly.
++ */
++static void wait_for_devices(struct xenbus_driver *xendrv)
++{
++	unsigned long start = jiffies;
++	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++	unsigned int seconds_waited = 0;
 +
-+		DPRINTK("Copying from mapped memory at %08lx\n",
-+			(unsigned long)(idx_to_kaddr(tpmif, i) |
-+					(tx->addr & ~PAGE_MASK)));
++	if (!ready_to_wait_for_devices || !is_running_on_xen())
++		return;
 +
-+		src = (void *)(idx_to_kaddr(tpmif, i) |
-+			       ((tx->addr & ~PAGE_MASK) + pg_offset));
-+		if (copy_to_buffer(&buffer[offset],
-+				   src, to_copy, isuserbuffer)) {
-+			return -EFAULT;
++	while (exists_disconnected_device(drv)) {
++		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++			if (!seconds_waited)
++				printk(KERN_WARNING "XENBUS: Waiting for "
++				       "devices to initialise: ");
++			seconds_waited += 5;
++			printk("%us...", 300 - seconds_waited);
++			if (seconds_waited == 300)
++				break;
 +		}
++		
++		schedule_timeout_interruptible(HZ/10);
++	}
 +
-+		DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
-+			tpmif->domid, buffer[offset], buffer[offset + 1],
-+			buffer[offset + 2], buffer[offset + 3]);
++	if (seconds_waited)
++		printk("\n");
 +
-+		gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
-+				    GNTMAP_host_map, handle);
++	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++			 print_device_status);
++}
 +
-+		if (unlikely
-+		    (HYPERVISOR_grant_table_op
-+		     (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+			BUG();
-+		}
++#ifndef MODULE
++static int __init boot_wait_for_devices(void)
++{
++	if (!xenbus_frontend.error) {
++		ready_to_wait_for_devices = 1;
++		wait_for_devices(NULL);
++	}
++	return 0;
++}
 +
-+		offset += to_copy;
-+		pg_offset = 0;
-+		last_read += to_copy;
-+		room_left -= to_copy;
++late_initcall(boot_wait_for_devices);
++#endif
 +
-+		to_copy = min_t(u32, PAGE_SIZE, room_left);
-+		i++;
-+	}			/* while (to_copy > 0) */
-+	/*
-+	 * Adjust the last_read pointer
-+	 */
-+	pak->last_read = last_read + 4;
-+	return offset;
++int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *))
++{
++	return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn);
 +}
-+
-+/* ============================================================
-+ * The file layer for reading data from this device
-+ * ============================================================
++EXPORT_SYMBOL_GPL(xenbus_for_each_frontend);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_probe.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_probe.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,75 @@
++/******************************************************************************
++ * xenbus_probe.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+static int vtpm_op_open(struct inode *inode, struct file *f)
-+{
-+	int rc = 0;
-+	unsigned long flags;
 +
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	if (dataex.has_opener == 0) {
-+		dataex.has_opener = 1;
-+	} else {
-+		rc = -EPERM;
-+	}
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+	return rc;
-+}
++#ifndef _XENBUS_PROBE_H
++#define _XENBUS_PROBE_H
 +
-+static ssize_t vtpm_op_read(struct file *file,
-+			    char __user * data, size_t size, loff_t * offset)
++#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
++extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
++extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
++extern void xenbus_backend_probe_and_watch(void);
++extern void xenbus_backend_bus_register(void);
++extern void xenbus_backend_device_register(void);
++#else
++static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_probe_and_watch(void) {}
++static inline void xenbus_backend_bus_register(void) {}
++static inline void xenbus_backend_device_register(void) {}
++#endif
++
++struct xen_bus_type
 +{
-+	int ret_size = -ENODATA;
-+	struct packet *pak = NULL;
-+	unsigned long flags;
++	char *root;
++	int error;
++	unsigned int levels;
++	int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
++	int (*probe)(const char *type, const char *dir);
++	struct bus_type bus;
++	struct device dev;
++};
 +
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	if (dataex.aborted) {
-+		dataex.aborted = 0;
-+		dataex.copied_so_far = 0;
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
-+		return -EIO;
-+	}
++extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
++extern int xenbus_dev_probe(struct device *_dev);
++extern int xenbus_dev_remove(struct device *_dev);
++extern int xenbus_register_driver_common(struct xenbus_driver *drv,
++					 struct xen_bus_type *bus);
++extern int xenbus_probe_node(struct xen_bus_type *bus,
++			     const char *type,
++			     const char *nodename);
++extern int xenbus_probe_devices(struct xen_bus_type *bus);
++
++extern void dev_changed(const char *node, struct xen_bus_type *bus);
++
++#endif
++
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_probe_backend.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_probe_backend.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,292 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have (backend half).
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...)				\
++	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
++		 __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
 +
-+	if (list_empty(&dataex.pending_pak)) {
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
-+		wait_event_interruptible(dataex.wait_queue,
-+					 !list_empty(&dataex.pending_pak));
-+		write_lock_irqsave(&dataex.pak_lock, flags);
-+		dataex.copied_so_far = 0;
-+	}
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
 +
-+	if (!list_empty(&dataex.pending_pak)) {
-+		unsigned int left;
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
 +
-+		pak = list_entry(dataex.pending_pak.next, struct packet, next);
-+		left = pak->data_len - dataex.copied_so_far;
-+		list_del(&pak->next);
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+		DPRINTK("size given by app: %d, available: %d\n", size, left);
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++				 int num_envp, char *buffer, int buffer_size);
++static int xenbus_probe_backend(const char *type, const char *domid);
 +
-+		ret_size = min_t(size_t, size, left);
++extern int read_otherend_details(struct xenbus_device *xendev,
++				 char *id_node, char *path_node);
 +
-+		ret_size = packet_read(pak, ret_size, data, size, 1);
++static int read_frontend_details(struct xenbus_device *xendev)
++{
++	return read_otherend_details(xendev, "frontend-id", "frontend");
++}
 +
-+		write_lock_irqsave(&dataex.pak_lock, flags);
++/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
++static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++	int domid, err;
++	const char *devid, *type, *frontend;
++	unsigned int typelen;
 +
-+		if (ret_size < 0) {
-+			del_singleshot_timer_sync(&pak->processing_timer);
-+			packet_free(pak);
-+			dataex.copied_so_far = 0;
-+		} else {
-+			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
++	type = strchr(nodename, '/');
++	if (!type)
++		return -EINVAL;
++	type++;
++	typelen = strcspn(type, "/");
++	if (!typelen || type[typelen] != '/')
++		return -EINVAL;
 +
-+			dataex.copied_so_far += ret_size;
-+			if (dataex.copied_so_far >= pak->data_len + 4) {
-+				DPRINTK("All data from this packet given to app.\n");
-+				/* All data given to app */
++	devid = strrchr(nodename, '/') + 1;
 +
-+				del_singleshot_timer_sync(&pak->
-+							  processing_timer);
-+				list_add_tail(&pak->next, &dataex.current_pak);
-+				/*
-+				 * The more fontends that are handled at the same time,
-+				 * the more time we give the TPM to process the request.
-+				 */
-+				mod_timer(&pak->processing_timer,
-+					  jiffies + (num_frontends * 60 * HZ));
-+				dataex.copied_so_far = 0;
-+			} else {
-+				list_add(&pak->next, &dataex.pending_pak);
-+			}
-+		}
-+	}
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
++			    "frontend", NULL, &frontend,
++			    NULL);
++	if (err)
++		return err;
++	if (strlen(frontend) == 0)
++		err = -ERANGE;
++	if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
++		err = -ENOENT;
++	kfree(frontend);
 +
-+	DPRINTK("Returning result from read to app: %d\n", ret_size);
++	if (err)
++		return err;
 +
-+	return ret_size;
++	if (snprintf(bus_id, BUS_ID_SIZE,
++		     "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
++		return -ENOSPC;
++	return 0;
 +}
 +
-+/*
-+ * Write operation - only works after a previous read operation!
-+ */
-+static ssize_t vtpm_op_write(struct file *file,
-+			     const char __user * data, size_t size,
-+			     loff_t * offset)
-+{
-+	struct packet *pak;
-+	int rc = 0;
-+	unsigned int off = 4;
-+	unsigned long flags;
-+	struct vtpm_resp_hdr vrh;
-+
-+	/*
-+	 * Minimum required packet size is:
-+	 * 4 bytes for instance number
-+	 * 2 bytes for tag
-+	 * 4 bytes for paramSize
-+	 * 4 bytes for the ordinal
-+	 * sum: 14 bytes
-+	 */
-+	if (size < sizeof (vrh))
-+		return -EFAULT;
++static struct xen_bus_type xenbus_backend = {
++	.root = "backend",
++	.levels = 3, 		/* backend/type/<frontend>/<id> */
++	.get_bus_id = backend_bus_id,
++	.probe = xenbus_probe_backend,
++	.error = -ENODEV,
++	.bus = {
++		.name     = "xen-backend",
++		.match    = xenbus_match,
++		.probe    = xenbus_dev_probe,
++		.remove   = xenbus_dev_remove,
++//		.shutdown = xenbus_dev_shutdown,
++		.uevent   = xenbus_uevent_backend,
++	},
++	.dev = {
++		.bus_id = "xen-backend",
++	},
++};
 +
-+	if (copy_from_user(&vrh, data, sizeof (vrh)))
-+		return -EFAULT;
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++				 int num_envp, char *buffer, int buffer_size)
++{
++	struct xenbus_device *xdev;
++	struct xenbus_driver *drv;
++	int i = 0;
++	int length = 0;
 +
-+	/* malformed packet? */
-+	if ((off + ntohl(vrh.len_no)) != size)
-+		return -EFAULT;
++	DPRINTK("");
 +
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	pak = packet_find_instance(&dataex.current_pak,
-+				   ntohl(vrh.instance_no));
++	if (dev == NULL)
++		return -ENODEV;
 +
-+	if (pak == NULL) {
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
-+		DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
-+		        ntohl(vrh.instance_no));
-+		return -EFAULT;
-+	}
++	xdev = to_xenbus_device(dev);
++	if (xdev == NULL)
++		return -ENODEV;
 +
-+	del_singleshot_timer_sync(&pak->processing_timer);
-+	list_del(&pak->next);
++	/* stuff we want to pass to /sbin/hotplug */
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "XENBUS_TYPE=%s", xdev->devicetype);
 +
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "XENBUS_PATH=%s", xdev->nodename);
 +
-+	/*
-+	 * The first 'offset' bytes must be the instance number - skip them.
-+	 */
-+	size -= off;
++	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++		       "XENBUS_BASE_PATH=%s", xenbus_backend.root);
 +
-+	rc = packet_write(pak, &data[off], size, 1);
++	/* terminate, set to next free slot, shrink available space */
++	envp[i] = NULL;
++	envp = &envp[i];
++	num_envp -= i;
++	buffer = &buffer[length];
++	buffer_size -= length;
 +
-+	if (rc > 0) {
-+		/* I neglected the first 4 bytes */
-+		rc += off;
++	if (dev->driver) {
++		drv = to_xenbus_driver(dev->driver);
++		if (drv && drv->uevent)
++			return drv->uevent(xdev, envp, num_envp, buffer,
++					   buffer_size);
 +	}
-+	packet_free(pak);
-+	return rc;
-+}
 +
-+static int vtpm_op_release(struct inode *inode, struct file *file)
-+{
-+	unsigned long flags;
-+
-+	vtpm_release_packets(NULL, 1);
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	dataex.has_opener = 0;
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
 +	return 0;
 +}
 +
-+static unsigned int vtpm_op_poll(struct file *file,
-+				 struct poll_table_struct *pts)
++int xenbus_register_backend(struct xenbus_driver *drv)
 +{
-+	unsigned int flags = POLLOUT | POLLWRNORM;
++	drv->read_otherend_details = read_frontend_details;
 +
-+	poll_wait(file, &dataex.wait_queue, pts);
-+	if (!list_empty(&dataex.pending_pak)) {
-+		flags |= POLLIN | POLLRDNORM;
-+	}
-+	return flags;
++	return xenbus_register_driver_common(drv, &xenbus_backend);
 +}
++EXPORT_SYMBOL_GPL(xenbus_register_backend);
 +
-+static const struct file_operations vtpm_ops = {
-+	.owner = THIS_MODULE,
-+	.llseek = no_llseek,
-+	.open = vtpm_op_open,
-+	.read = vtpm_op_read,
-+	.write = vtpm_op_write,
-+	.release = vtpm_op_release,
-+	.poll = vtpm_op_poll,
-+};
-+
-+static struct miscdevice vtpms_miscdevice = {
-+	.minor = 225,
-+	.name = "vtpm",
-+	.fops = &vtpm_ops,
-+};
-+
-+/***************************************************************
-+ Utility functions
-+***************************************************************/
-+
-+static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
++/* backend/<typename>/<frontend-uuid>/<name> */
++static int xenbus_probe_backend_unit(const char *dir,
++				     const char *type,
++				     const char *name)
 +{
-+	int rc;
-+	static const unsigned char tpm_error_message_fail[] = {
-+		0x00, 0x00,
-+		0x00, 0x00, 0x00, 0x0a,
-+		0x00, 0x00, 0x00, 0x09	/* TPM_FAIL */
-+	};
-+	unsigned char buffer[sizeof (tpm_error_message_fail)];
++	char *nodename;
++	int err;
 +
-+	memcpy(buffer, tpm_error_message_fail,
-+	       sizeof (tpm_error_message_fail));
-+	/*
-+	 * Insert the right response tag depending on the given tag
-+	 * All response tags are '+3' to the request tag.
-+	 */
-+	buffer[1] = req_tag + 3;
++	nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
++	if (!nodename)
++		return -ENOMEM;
 +
-+	/*
-+	 * Write the data to shared memory and notify the front-end
-+	 */
-+	rc = packet_write(pak, buffer, sizeof (buffer), 0);
++	DPRINTK("%s\n", nodename);
 +
-+	return rc;
++	err = xenbus_probe_node(&xenbus_backend, type, nodename);
++	kfree(nodename);
++	return err;
 +}
 +
-+static int _vtpm_release_packets(struct list_head *head,
-+				 tpmif_t * tpmif, int send_msgs)
++/* backend/<typename>/<frontend-domid> */
++static int xenbus_probe_backend(const char *type, const char *domid)
 +{
-+	int aborted = 0;
-+	int c = 0;
-+	struct packet *pak;
-+	struct list_head *pos, *tmp;
-+
-+	list_for_each_safe(pos, tmp, head) {
-+		pak = list_entry(pos, struct packet, next);
-+		c += 1;
++	char *nodename;
++	int err = 0;
++	char **dir;
++	unsigned int i, dir_n = 0;
 +
-+		if (tpmif == NULL || pak->tpmif == tpmif) {
-+			int can_send = 0;
++	DPRINTK("");
 +
-+			del_singleshot_timer_sync(&pak->processing_timer);
-+			list_del(&pak->next);
++	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
++	if (!nodename)
++		return -ENOMEM;
 +
-+			if (pak->tpmif && pak->tpmif->status == CONNECTED) {
-+				can_send = 1;
-+			}
++	dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
++	if (IS_ERR(dir)) {
++		kfree(nodename);
++		return PTR_ERR(dir);
++	}
 +
-+			if (send_msgs && can_send) {
-+				tpm_send_fail_message(pak, pak->req_tag);
-+			}
-+			packet_free(pak);
-+			if (c == 1)
-+				aborted = 1;
-+		}
++	for (i = 0; i < dir_n; i++) {
++		err = xenbus_probe_backend_unit(nodename, type, dir[i]);
++		if (err)
++			break;
 +	}
-+	return aborted;
++	kfree(dir);
++	kfree(nodename);
++	return err;
 +}
 +
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
 +{
-+	unsigned long flags;
++	DPRINTK("");
 +
-+	write_lock_irqsave(&dataex.pak_lock, flags);
++	dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++}
 +
-+	dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
-+					       tpmif,
-+					       send_msgs);
-+	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
++static struct xenbus_watch be_watch = {
++	.node = "backend",
++	.callback = backend_changed,
++};
 +
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+	return 0;
++void xenbus_backend_suspend(int (*fn)(struct device *, void *))
++{
++	DPRINTK("");
++	if (!xenbus_backend.error)
++		bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
 +}
 +
-+static int vtpm_queue_packet(struct packet *pak)
++void xenbus_backend_resume(int (*fn)(struct device *, void *))
 +{
-+	int rc = 0;
++	DPRINTK("");
++	if (!xenbus_backend.error)
++		bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
 +
-+	if (dataex.has_opener) {
-+		unsigned long flags;
++void xenbus_backend_probe_and_watch(void)
++{
++	xenbus_probe_devices(&xenbus_backend);
++	register_xenbus_watch(&be_watch);
++}
 +
-+		write_lock_irqsave(&dataex.pak_lock, flags);
-+		list_add_tail(&pak->next, &dataex.pending_pak);
-+		/* give the TPM some time to pick up the request */
-+		mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
++void xenbus_backend_bus_register(void)
++{
++	xenbus_backend.error = bus_register(&xenbus_backend.bus);
++	if (xenbus_backend.error)
++		printk(KERN_WARNING
++		       "XENBUS: Error registering backend bus: %i\n",
++		       xenbus_backend.error);
++}
 +
-+		wake_up_interruptible(&dataex.wait_queue);
-+	} else {
-+		rc = -EFAULT;
++void xenbus_backend_device_register(void)
++{
++	if (xenbus_backend.error)
++		return;
++
++	xenbus_backend.error = device_register(&xenbus_backend.dev);
++	if (xenbus_backend.error) {
++		bus_unregister(&xenbus_backend.bus);
++		printk(KERN_WARNING
++		       "XENBUS: Error registering backend device: %i\n",
++		       xenbus_backend.error);
 +	}
-+	return rc;
 +}
 +
-+static int vtpm_receive(tpmif_t * tpmif, u32 size)
++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *))
 +{
-+	int rc = 0;
-+	unsigned char buffer[10];
-+	__be32 *native_size;
-+	struct packet *pak = packet_alloc(tpmif, size, 0, 0);
++	return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn);
++}
++EXPORT_SYMBOL_GPL(xenbus_for_each_backend);
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenbus/xenbus_xs.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenbus/xenbus_xs.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,902 @@
++/******************************************************************************
++ * xenbus_xs.c
++ *
++ * This is the kernel equivalent of the "xs" library.  We don't need everything
++ * and we use xenbus_comms for communication.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	if (!pak)
-+		return -ENOMEM;
-+	/*
-+	 * Read 10 bytes from the received buffer to test its
-+	 * content for validity.
-+	 */
-+	if (sizeof (buffer) != packet_read(pak,
-+					   sizeof (buffer), buffer,
-+					   sizeof (buffer), 0)) {
-+		goto failexit;
-+	}
-+	/*
-+	 * Reset the packet read pointer so we can read all its
-+	 * contents again.
-+	 */
-+	packet_reset(pak);
++#include <linux/unistd.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/uio.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/fcntl.h>
++#include <linux/kthread.h>
++#include <linux/rwsem.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <xen/xenbus.h>
++#include "xenbus_comms.h"
 +
-+	native_size = (__force __be32 *) (&buffer[4 + 2]);
-+	/*
-+	 * Verify that the size of the packet is correct
-+	 * as indicated and that there's actually someone reading packets.
-+	 * The minimum size of the packet is '10' for tag, size indicator
-+	 * and ordinal.
-+	 */
-+	if (size < 10 ||
-+	    be32_to_cpu(*native_size) != size ||
-+	    0 == dataex.has_opener || tpmif->status != CONNECTED) {
-+		rc = -EINVAL;
-+		goto failexit;
-+	} else {
-+		rc = vtpm_queue_packet(pak);
-+		if (rc < 0)
-+			goto failexit;
-+	}
-+	return 0;
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+      failexit:
-+	if (pak) {
-+		tpm_send_fail_message(pak, buffer[4 + 1]);
-+		packet_free(pak);
-+	}
-+	return rc;
-+}
++#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
++#define PF_NOFREEZE	0
++#endif
 +
-+/*
-+ * Timeout function that gets invoked when a packet has not been processed
-+ * during the timeout period.
-+ * The packet must be on a list when this function is invoked. This
-+ * also means that once its taken off a list, the timer must be
-+ * destroyed as well.
-+ */
-+static void processing_timeout(unsigned long ptr)
-+{
-+	struct packet *pak = (struct packet *)ptr;
-+	unsigned long flags;
++struct xs_stored_msg {
++	struct list_head list;
++
++	struct xsd_sockmsg hdr;
++
++	union {
++		/* Queued replies. */
++		struct {
++			char *body;
++		} reply;
++
++		/* Queued watch events. */
++		struct {
++			struct xenbus_watch *handle;
++			char **vec;
++			unsigned int vec_size;
++		} watch;
++	} u;
++};
++
++struct xs_handle {
++	/* A list of replies. Currently only one will ever be outstanding. */
++	struct list_head reply_list;
++	spinlock_t reply_lock;
++	wait_queue_head_t reply_waitq;
 +
-+	write_lock_irqsave(&dataex.pak_lock, flags);
 +	/*
-+	 * The packet needs to be searched whether it
-+	 * is still on the list.
++	 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
++	 * response_mutex is never taken simultaneously with the other three.
 +	 */
-+	if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
-+	    pak == packet_find_packet(&dataex.current_pak, pak)) {
-+		if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
-+			tpm_send_fail_message(pak, pak->req_tag);
-+		}
-+		/* discard future responses */
-+		pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
-+	}
 +
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+}
++	/* One request at a time. */
++	struct mutex request_mutex;
 +
-+static void tpm_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
++	/* Protect xenbus reader thread against save/restore. */
++	struct mutex response_mutex;
 +
-+static struct list_head tpm_schedule_list;
-+static spinlock_t tpm_schedule_list_lock;
++	/* Protect transactions against save/restore. */
++	struct rw_semaphore transaction_mutex;
 +
-+static inline void maybe_schedule_tx_action(void)
-+{
-+	smp_mb();
-+	tasklet_schedule(&tpm_tx_tasklet);
-+}
++	/* Protect watch (de)register against save/restore. */
++	struct rw_semaphore watch_mutex;
++};
 +
-+static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+	return tpmif->list.next != NULL;
-+}
++static struct xs_handle xs_state;
 +
-+static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+	spin_lock_irq(&tpm_schedule_list_lock);
-+	if (likely(__on_tpm_schedule_list(tpmif))) {
-+		list_del(&tpmif->list);
-+		tpmif->list.next = NULL;
-+		tpmif_put(tpmif);
-+	}
-+	spin_unlock_irq(&tpm_schedule_list_lock);
-+}
++/* List of registered watches, and a lock to protect it. */
++static LIST_HEAD(watches);
++static DEFINE_SPINLOCK(watches_lock);
 +
-+static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
-+{
-+	if (__on_tpm_schedule_list(tpmif))
-+		return;
++/* List of pending watch callback events, and a lock to protect it. */
++static LIST_HEAD(watch_events);
++static DEFINE_SPINLOCK(watch_events_lock);
 +
-+	spin_lock_irq(&tpm_schedule_list_lock);
-+	if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
-+		list_add_tail(&tpmif->list, &tpm_schedule_list);
-+		tpmif_get(tpmif);
-+	}
-+	spin_unlock_irq(&tpm_schedule_list_lock);
-+}
++/*
++ * Details of the xenwatch callback kernel thread. The thread waits on the
++ * watch_events_waitq for work to do (queued on watch_events list). When it
++ * wakes up it acquires the xenwatch_mutex before reading the list and
++ * carrying out work.
++ */
++static pid_t xenwatch_pid;
++/* static */ DEFINE_MUTEX(xenwatch_mutex);
++static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
 +
-+void tpmif_schedule_work(tpmif_t * tpmif)
++static int get_error(const char *errorstring)
 +{
-+	add_to_tpm_schedule_list_tail(tpmif);
-+	maybe_schedule_tx_action();
-+}
++	unsigned int i;
 +
-+void tpmif_deschedule_work(tpmif_t * tpmif)
-+{
-+	remove_from_tpm_schedule_list(tpmif);
++	for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
++		if (i == ARRAY_SIZE(xsd_errors) - 1) {
++			printk(KERN_WARNING
++			       "XENBUS xen store gave: unknown error %s",
++			       errorstring);
++			return EINVAL;
++		}
++	}
++	return xsd_errors[i].errnum;
 +}
 +
-+static void tpm_tx_action(unsigned long unused)
++static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
 +{
-+	struct list_head *ent;
-+	tpmif_t *tpmif;
-+	tpmif_tx_request_t *tx;
++	struct xs_stored_msg *msg;
++	char *body;
 +
-+	DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
++	spin_lock(&xs_state.reply_lock);
 +
-+	while (!list_empty(&tpm_schedule_list)) {
-+		/* Get a tpmif from the list with work to do. */
-+		ent = tpm_schedule_list.next;
-+		tpmif = list_entry(ent, tpmif_t, list);
-+		tpmif_get(tpmif);
-+		remove_from_tpm_schedule_list(tpmif);
++	while (list_empty(&xs_state.reply_list)) {
++		spin_unlock(&xs_state.reply_lock);
++		/* XXX FIXME: Avoid synchronous wait for response here. */
++		wait_event(xs_state.reply_waitq,
++			   !list_empty(&xs_state.reply_list));
++		spin_lock(&xs_state.reply_lock);
++	}
 +
-+		tx = &tpmif->tx->ring[0].req;
++	msg = list_entry(xs_state.reply_list.next,
++			 struct xs_stored_msg, list);
++	list_del(&msg->list);
 +
-+		/* pass it up */
-+		vtpm_receive(tpmif, tx->size);
++	spin_unlock(&xs_state.reply_lock);
 +
-+		tpmif_put(tpmif);
-+	}
-+}
++	*type = msg->hdr.type;
++	if (len)
++		*len = msg->hdr.len;
++	body = msg->u.reply.body;
 +
-+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	tpmif_t *tpmif = (tpmif_t *) dev_id;
++	kfree(msg);
 +
-+	add_to_tpm_schedule_list_tail(tpmif);
-+	maybe_schedule_tx_action();
-+	return IRQ_HANDLED;
++	return body;
 +}
 +
-+static int __init tpmback_init(void)
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 +{
-+	int rc;
++	void *ret;
++	struct xsd_sockmsg req_msg = *msg;
++	int err;
 +
-+	if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
-+		printk(KERN_ALERT
-+		       "Could not register misc device for TPM BE.\n");
-+		return rc;
-+	}
++	if (req_msg.type == XS_TRANSACTION_START)
++		down_read(&xs_state.transaction_mutex);
 +
-+	dataex_init(&dataex);
++	mutex_lock(&xs_state.request_mutex);
 +
-+	spin_lock_init(&tpm_schedule_list_lock);
-+	INIT_LIST_HEAD(&tpm_schedule_list);
++	err = xb_write(msg, sizeof(*msg) + msg->len);
++	if (err) {
++		msg->type = XS_ERROR;
++		ret = ERR_PTR(err);
++	} else
++		ret = read_reply(&msg->type, &msg->len);
 +
-+	tpmif_interface_init();
-+	tpmif_xenbus_init();
++	mutex_unlock(&xs_state.request_mutex);
 +
-+	printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
++	if ((req_msg.type == XS_TRANSACTION_END) ||
++	    ((req_msg.type == XS_TRANSACTION_START) &&
++	     (msg->type == XS_ERROR)))
++		up_read(&xs_state.transaction_mutex);
 +
-+	return 0;
++	return ret;
 +}
 +
-+module_init(tpmback_init);
-+
-+void __exit tpmback_exit(void)
++/* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
++static void *xs_talkv(struct xenbus_transaction t,
++		      enum xsd_sockmsg_type type,
++		      const struct kvec *iovec,
++		      unsigned int num_vecs,
++		      unsigned int *len)
 +{
-+	vtpm_release_packets(NULL, 0);
-+	tpmif_xenbus_exit();
-+	tpmif_interface_exit();
-+	misc_deregister(&vtpms_miscdevice);
-+}
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/tpmback/xenbus.c tmp-linux-2.6-xen.patch/drivers/xen/tpmback/xenbus.c
---- pristine-linux-2.6.18.2/drivers/xen/tpmback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/tpmback/xenbus.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,289 @@
-+/*  Xenbus code for tpmif backend
-+    Copyright (C) 2005 IBM Corporation
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++	struct xsd_sockmsg msg;
++	void *ret = NULL;
++	unsigned int i;
++	int err;
 +
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
++	msg.tx_id = t.id;
++	msg.req_id = 0;
++	msg.type = type;
++	msg.len = 0;
++	for (i = 0; i < num_vecs; i++)
++		msg.len += iovec[i].iov_len;
 +
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
++	mutex_lock(&xs_state.request_mutex);
 +
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
++	err = xb_write(&msg, sizeof(msg));
++	if (err) {
++		mutex_unlock(&xs_state.request_mutex);
++		return ERR_PTR(err);
++	}
 +
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
++	for (i = 0; i < num_vecs; i++) {
++		err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
++		if (err) {
++			mutex_unlock(&xs_state.request_mutex);
++			return ERR_PTR(err);
++		}
++	}
 +
-+	/* our communications channel */
-+	tpmif_t *tpmif;
++	ret = read_reply(&msg.type, len);
 +
-+	long int frontend_id;
-+	long int instance; // instance of TPM
-+	u8 is_instance_set;// whether instance number has been set
++	mutex_unlock(&xs_state.request_mutex);
 +
-+	/* watch front end for changes */
-+	struct xenbus_watch backend_watch;
-+};
++	if (IS_ERR(ret))
++		return ret;
 +
-+static void maybe_connect(struct backend_info *be);
-+static void connect(struct backend_info *be);
-+static int connect_ring(struct backend_info *be);
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len);
-+static void frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state);
++	if (msg.type == XS_ERROR) {
++		err = get_error(ret);
++		kfree(ret);
++		return ERR_PTR(-err);
++	}
 +
-+long int tpmback_get_instance(struct backend_info *bi)
-+{
-+	long int res = -1;
-+	if (bi && bi->is_instance_set)
-+		res = bi->instance;
-+	return res;
++	if (msg.type != type) {
++		if (printk_ratelimit())
++			printk(KERN_WARNING
++			       "XENBUS unexpected type [%d], expected [%d]\n",
++			       msg.type, type);
++		kfree(ret);
++		return ERR_PTR(-EINVAL);
++	}
++	return ret;
 +}
 +
-+static int tpmback_remove(struct xenbus_device *dev)
++/* Simplified version of xs_talkv: single message. */
++static void *xs_single(struct xenbus_transaction t,
++		       enum xsd_sockmsg_type type,
++		       const char *string,
++		       unsigned int *len)
 +{
-+	struct backend_info *be = dev->dev.driver_data;
++	struct kvec iovec;
 +
-+	if (!be) return 0;
++	iovec.iov_base = (void *)string;
++	iovec.iov_len = strlen(string) + 1;
++	return xs_talkv(t, type, &iovec, 1, len);
++}
 +
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
-+	if (be->tpmif) {
-+		be->tpmif->bi = NULL;
-+		vtpm_release_packets(be->tpmif, 0);
-+		tpmif_put(be->tpmif);
-+		be->tpmif = NULL;
-+	}
-+	kfree(be);
-+	dev->dev.driver_data = NULL;
++/* Many commands only need an ack, don't care what it says. */
++static int xs_error(char *reply)
++{
++	if (IS_ERR(reply))
++		return PTR_ERR(reply);
++	kfree(reply);
 +	return 0;
 +}
 +
-+static int tpmback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
++static unsigned int count_strings(const char *strings, unsigned int len)
 +{
-+	int err;
-+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
++	unsigned int num;
++	const char *p;
 +
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
-+		return -ENOMEM;
-+	}
++	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
++		num++;
 +
-+	be->is_instance_set = 0;
-+	be->dev = dev;
-+	dev->dev.driver_data = be;
++	return num;
++}
 +
-+	err = xenbus_watch_path2(dev, dev->nodename,
-+				 "instance", &be->backend_watch,
-+				 backend_changed);
-+	if (err) {
-+		goto fail;
-+	}
++/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
++static char *join(const char *dir, const char *name)
++{
++	char *buffer;
 +
-+	err = xenbus_switch_state(dev, XenbusStateInitWait);
-+	if (err) {
-+		goto fail;
-+	}
-+	return 0;
-+fail:
-+	tpmback_remove(dev);
-+	return err;
++	if (strlen(name) == 0)
++		buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
++	else
++		buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
++	return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
 +}
 +
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
++static char **split(char *strings, unsigned int len, unsigned int *num)
 +{
-+	int err;
-+	long instance;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
++	char *p, **ret;
 +
-+	err = xenbus_scanf(XBT_NIL, dev->nodename,
-+			   "instance","%li", &instance);
-+	if (XENBUS_EXIST_ERR(err)) {
-+		return;
-+	}
++	/* Count the strings. */
++	*num = count_strings(strings, len) + 1;
 +
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading instance");
-+		return;
++	/* Transfer to one big alloc for easy freeing. */
++	ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
++	if (!ret) {
++		kfree(strings);
++		return ERR_PTR(-ENOMEM);
 +	}
++	memcpy(&ret[*num], strings, len);
++	kfree(strings);
 +
-+	if (be->is_instance_set == 0) {
-+		be->instance = instance;
-+		be->is_instance_set = 1;
-+	}
-+}
++	strings = (char *)&ret[*num];
++	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
++		ret[(*num)++] = p;
++	ret[*num] = strings + len;
 +
++	return ret;
++}
 +
-+static void frontend_changed(struct xenbus_device *dev,
-+			     enum xenbus_state frontend_state)
++char **xenbus_directory(struct xenbus_transaction t,
++			const char *dir, const char *node, unsigned int *num)
 +{
-+	struct backend_info *be = dev->dev.driver_data;
-+	int err;
++	char *strings, *path;
++	unsigned int len;
 +
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitialised:
-+		break;
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return (char **)path;
 +
-+	case XenbusStateConnected:
-+		err = connect_ring(be);
-+		if (err) {
-+			return;
-+		}
-+		maybe_connect(be);
-+		break;
++	strings = xs_single(t, XS_DIRECTORY, path, &len);
++	kfree(path);
++	if (IS_ERR(strings))
++		return (char **)strings;
 +
-+	case XenbusStateClosing:
-+		be->instance = -1;
-+		xenbus_switch_state(dev, XenbusStateClosing);
-+		break;
++	return split(strings, len, num);
++}
++EXPORT_SYMBOL_GPL(xenbus_directory);
 +
-+	case XenbusStateUnknown: /* keep it here */
-+	case XenbusStateClosed:
-+		xenbus_switch_state(dev, XenbusStateClosed);
-+		device_unregister(&be->dev->dev);
-+		tpmback_remove(dev);
-+		break;
++/* Check if a path exists. Return 1 if it does. */
++int xenbus_exists(struct xenbus_transaction t,
++		  const char *dir, const char *node)
++{
++	char **d;
++	int dir_n;
 +
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL,
-+				 "saw state %d at frontend",
-+				 frontend_state);
-+		break;
-+	}
++	d = xenbus_directory(t, dir, node, &dir_n);
++	if (IS_ERR(d))
++		return 0;
++	kfree(d);
++	return 1;
 +}
++EXPORT_SYMBOL_GPL(xenbus_exists);
++
++/* Get the value of a single file.
++ * Returns a kmalloced value: call free() on it after use.
++ * len indicates length in bytes.
++ */
++void *xenbus_read(struct xenbus_transaction t,
++		  const char *dir, const char *node, unsigned int *len)
++{
++	char *path;
++	void *ret;
 +
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return (void *)path;
 +
++	ret = xs_single(t, XS_READ, path, len);
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_read);
 +
-+static void maybe_connect(struct backend_info *be)
++/* Write the value of a single file.
++ * Returns -err on failure.
++ */
++int xenbus_write(struct xenbus_transaction t,
++		 const char *dir, const char *node, const char *string)
 +{
-+	if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
-+		return;
++	const char *path;
++	struct kvec iovec[2];
++	int ret;
 +
-+	connect(be);
-+}
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	iovec[0].iov_base = (void *)path;
++	iovec[0].iov_len = strlen(path) + 1;
++	iovec[1].iov_base = (void *)string;
++	iovec[1].iov_len = strlen(string);
 +
++	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_write);
 +
-+static void connect(struct backend_info *be)
++/* Create a new directory. */
++int xenbus_mkdir(struct xenbus_transaction t,
++		 const char *dir, const char *node)
 +{
-+	struct xenbus_transaction xbt;
-+	int err;
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ready = 1;
++	char *path;
++	int ret;
 +
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(be->dev, err, "starting transaction");
-+		return;
-+	}
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_mkdir);
 +
-+	err = xenbus_printf(xbt, be->dev->nodename,
-+			    "ready", "%lu", ready);
-+	if (err) {
-+		xenbus_dev_fatal(be->dev, err, "writing 'ready'");
-+		goto abort;
-+	}
++/* Destroy a file or directory (directories must be empty). */
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
++{
++	char *path;
++	int ret;
 +
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err)
-+		xenbus_dev_fatal(be->dev, err, "end of transaction");
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
 +
-+	err = xenbus_switch_state(dev, XenbusStateConnected);
-+	if (!err)
-+		be->tpmif->status = CONNECTED;
-+	return;
-+abort:
-+	xenbus_transaction_end(xbt, 1);
++	ret = xs_error(xs_single(t, XS_RM, path, NULL));
++	kfree(path);
++	return ret;
 +}
++EXPORT_SYMBOL_GPL(xenbus_rm);
 +
-+
-+static int connect_ring(struct backend_info *be)
++/* Start a transaction: changes by others will not be seen during this
++ * transaction, and changes will not be visible to others until end.
++ */
++int xenbus_transaction_start(struct xenbus_transaction *t)
 +{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	int err;
++	char *id_str;
 +
-+	err = xenbus_gather(XBT_NIL, dev->otherend,
-+			    "ring-ref", "%lu", &ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_error(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
++	down_read(&xs_state.transaction_mutex);
 +
-+	if (!be->tpmif) {
-+		be->tpmif = tpmif_find(dev->otherend_id, be);
-+		if (IS_ERR(be->tpmif)) {
-+			err = PTR_ERR(be->tpmif);
-+			be->tpmif = NULL;
-+			xenbus_dev_fatal(dev,err,"creating vtpm interface");
-+			return err;
-+		}
++	id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
++	if (IS_ERR(id_str)) {
++		up_read(&xs_state.transaction_mutex);
++		return PTR_ERR(id_str);
 +	}
 +
-+	if (be->tpmif != NULL) {
-+		err = tpmif_map(be->tpmif, ring_ref, evtchn);
-+		if (err) {
-+			xenbus_dev_error(dev, err,
-+					 "mapping shared-frame %lu port %u",
-+					 ring_ref, evtchn);
-+			return err;
-+		}
-+	}
++	t->id = simple_strtoul(id_str, NULL, 0);
++	kfree(id_str);
 +	return 0;
 +}
++EXPORT_SYMBOL_GPL(xenbus_transaction_start);
 +
++/* End a transaction.
++ * If abandon is true, transaction is discarded instead of committed.
++ */
++int xenbus_transaction_end(struct xenbus_transaction t, int abort)
++{
++	char abortstr[2];
++	int err;
 +
-+static struct xenbus_device_id tpmback_ids[] = {
-+	{ "vtpm" },
-+	{ "" }
-+};
-+
++	if (abort)
++		strcpy(abortstr, "F");
++	else
++		strcpy(abortstr, "T");
 +
-+static struct xenbus_driver tpmback = {
-+	.name = "vtpm",
-+	.owner = THIS_MODULE,
-+	.ids = tpmback_ids,
-+	.probe = tpmback_probe,
-+	.remove = tpmback_remove,
-+	.otherend_changed = frontend_changed,
-+};
++	err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
 +
++	up_read(&xs_state.transaction_mutex);
 +
-+void tpmif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&tpmback);
++	return err;
 +}
++EXPORT_SYMBOL_GPL(xenbus_transaction_end);
 +
-+void tpmif_xenbus_exit(void)
++/* Single read and scanf: returns -errno or num scanned. */
++int xenbus_scanf(struct xenbus_transaction t,
++		 const char *dir, const char *node, const char *fmt, ...)
 +{
-+	xenbus_unregister_driver(&tpmback);
++	va_list ap;
++	int ret;
++	char *val;
++
++	val = xenbus_read(t, dir, node, NULL);
++	if (IS_ERR(val))
++		return PTR_ERR(val);
++
++	va_start(ap, fmt);
++	ret = vsscanf(val, fmt, ap);
++	va_end(ap);
++	kfree(val);
++	/* Distinctive errno. */
++	if (ret == 0)
++		return -ERANGE;
++	return ret;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/util.c tmp-linux-2.6-xen.patch/drivers/xen/util.c
---- pristine-linux-2.6.18.2/drivers/xen/util.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/util.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,70 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <xen/driver_util.h>
++EXPORT_SYMBOL_GPL(xenbus_scanf);
 +
-+struct class *get_xen_class(void)
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++		  const char *dir, const char *node, const char *fmt, ...)
 +{
-+	static struct class *xen_class;
++	va_list ap;
++	int ret;
++#define PRINTF_BUFFER_SIZE 4096
++	char *printf_buffer;
 +
-+	if (xen_class)
-+		return xen_class;
++	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
++	if (printf_buffer == NULL)
++		return -ENOMEM;
 +
-+	xen_class = class_create(THIS_MODULE, "xen");
-+	if (IS_ERR(xen_class)) {
-+		printk("Failed to create xen sysfs class.\n");
-+		xen_class = NULL;
-+	}
++	va_start(ap, fmt);
++	ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
++	va_end(ap);
 +
-+	return xen_class;
-+}
-+EXPORT_SYMBOL_GPL(get_xen_class);
++	BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
++	ret = xenbus_write(t, dir, node, printf_buffer);
 +
-+/* Todo: merge ia64 ('auto-translate physmap') versions of these functions. */
-+#ifndef __ia64__
++	kfree(printf_buffer);
 +
-+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+	/* apply_to_page_range() does all the hard work. */
-+	return 0;
++	return ret;
 +}
++EXPORT_SYMBOL_GPL(xenbus_printf);
 +
-+struct vm_struct *alloc_vm_area(unsigned long size)
++/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
 +{
-+	struct vm_struct *area;
++	va_list ap;
++	const char *name;
++	int ret = 0;
 +
-+	area = get_vm_area(size, VM_IOREMAP);
-+	if (area == NULL)
-+		return NULL;
++	va_start(ap, dir);
++	while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
++		const char *fmt = va_arg(ap, char *);
++		void *result = va_arg(ap, void *);
++		char *p;
 +
-+	/*
-+	 * This ensures that page tables are constructed for this region
-+	 * of kernel virtual address space and mapped into init_mm.
-+	 */
-+	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
-+				area->size, f, NULL)) {
-+		free_vm_area(area);
-+		return NULL;
++		p = xenbus_read(t, dir, name, NULL);
++		if (IS_ERR(p)) {
++			ret = PTR_ERR(p);
++			break;
++		}
++		if (fmt) {
++			if (sscanf(p, fmt, result) == 0)
++				ret = -EINVAL;
++			kfree(p);
++		} else
++			*(char **)result = p;
 +	}
++	va_end(ap);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_gather);
 +
-+	/* Map page directories into every address space. */
-+#ifdef CONFIG_X86
-+	vmalloc_sync_all();
-+#endif
++static int xs_watch(const char *path, const char *token)
++{
++	struct kvec iov[2];
 +
-+	return area;
++	iov[0].iov_base = (void *)path;
++	iov[0].iov_len = strlen(path) + 1;
++	iov[1].iov_base = (void *)token;
++	iov[1].iov_len = strlen(token) + 1;
++
++	return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
++				 ARRAY_SIZE(iov), NULL));
 +}
-+EXPORT_SYMBOL_GPL(alloc_vm_area);
 +
-+void free_vm_area(struct vm_struct *area)
++static int xs_unwatch(const char *path, const char *token)
 +{
-+	struct vm_struct *ret;
-+	ret = remove_vm_area(area->addr);
-+	BUG_ON(ret != area);
-+	kfree(area);
++	struct kvec iov[2];
++
++	iov[0].iov_base = (char *)path;
++	iov[0].iov_len = strlen(path) + 1;
++	iov[1].iov_base = (char *)token;
++	iov[1].iov_len = strlen(token) + 1;
++
++	return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
++				 ARRAY_SIZE(iov), NULL));
 +}
-+EXPORT_SYMBOL_GPL(free_vm_area);
 +
-+#endif /* !__ia64__ */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/Makefile tmp-linux-2.6-xen.patch/drivers/xen/xenbus/Makefile
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/Makefile	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,9 @@
-+obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
-+obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
++static struct xenbus_watch *find_watch(const char *token)
++{
++	struct xenbus_watch *i, *cmp;
 +
-+xenbus_be-objs =
-+xenbus_be-objs += xenbus_backend_client.o
++	cmp = (void *)simple_strtoul(token, NULL, 16);
 +
-+xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
-+obj-y += $(xenbus-y) $(xenbus-m)
-+obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_backend_client.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_backend_client.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_backend_client.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_backend_client.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,147 @@
-+/******************************************************************************
-+ * Backend-client-facing interface for the Xenbus driver.  In other words, the
-+ * interface between the Xenbus and the device-specific code in the backend
-+ * driver.
-+ *
-+ * Copyright (C) 2005-2006 XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	list_for_each_entry(i, &watches, list)
++		if (i == cmp)
++			return i;
 +
-+#include <linux/err.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include <xen/driver_util.h>
++	return NULL;
++}
 +
-+/* Based on Rusty Russell's skeleton driver's map_page */
-+struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
++/* Register callback to watch this node. */
++int register_xenbus_watch(struct xenbus_watch *watch)
 +{
-+	struct gnttab_map_grant_ref op;
-+	struct vm_struct *area;
++	/* Pointer in ascii is the token. */
++	char token[sizeof(watch) * 2 + 1];
++	int err;
 +
-+	area = alloc_vm_area(PAGE_SIZE);
-+	if (!area)
-+		return ERR_PTR(-ENOMEM);
++	sprintf(token, "%lX", (long)watch);
 +
-+	gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
-+			  gnt_ref, dev->otherend_id);
-+	
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++	down_read(&xs_state.watch_mutex);
 +
-+	if (op.status != GNTST_okay) {
-+		free_vm_area(area);
-+		xenbus_dev_fatal(dev, op.status,
-+				 "mapping in shared page %d from domain %d",
-+				 gnt_ref, dev->otherend_id);
-+		BUG_ON(!IS_ERR(ERR_PTR(op.status)));
-+		return ERR_PTR(op.status);
++	spin_lock(&watches_lock);
++	BUG_ON(find_watch(token));
++	list_add(&watch->list, &watches);
++	spin_unlock(&watches_lock);
++
++	err = xs_watch(watch->node, token);
++
++	/* Ignore errors due to multiple registration. */
++	if ((err != 0) && (err != -EEXIST)) {
++		spin_lock(&watches_lock);
++		list_del(&watch->list);
++		spin_unlock(&watches_lock);
 +	}
 +
-+	/* Stuff the handle in an unused field */
-+	area->phys_addr = (unsigned long)op.handle;
++	up_read(&xs_state.watch_mutex);
 +
-+	return area;
++	return err;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
-+
++EXPORT_SYMBOL_GPL(register_xenbus_watch);
 +
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+		   grant_handle_t *handle, void *vaddr)
++void unregister_xenbus_watch(struct xenbus_watch *watch)
 +{
-+	struct gnttab_map_grant_ref op;
-+	
-+	gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
-+			  gnt_ref, dev->otherend_id);
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+		BUG();
++	struct xs_stored_msg *msg, *tmp;
++	char token[sizeof(watch) * 2 + 1];
++	int err;
 +
-+	if (op.status != GNTST_okay) {
-+		xenbus_dev_fatal(dev, op.status,
-+				 "mapping in shared page %d from domain %d",
-+				 gnt_ref, dev->otherend_id);
-+	} else
-+		*handle = op.handle;
++	BUG_ON(watch->flags & XBWF_new_thread);
 +
-+	return op.status;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_map_ring);
++	sprintf(token, "%lX", (long)watch);
 +
++	down_read(&xs_state.watch_mutex);
 +
-+/* Based on Rusty Russell's skeleton driver's unmap_page */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
-+{
-+	struct gnttab_unmap_grant_ref op;
++	spin_lock(&watches_lock);
++	BUG_ON(!find_watch(token));
++	list_del(&watch->list);
++	spin_unlock(&watches_lock);
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
-+			    (grant_handle_t)area->phys_addr);
++	err = xs_unwatch(watch->node, token);
++	if (err)
++		printk(KERN_WARNING
++		       "XENBUS Failed to release watch %s: %i\n",
++		       watch->node, err);
 +
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	up_read(&xs_state.watch_mutex);
 +
-+	if (op.status == GNTST_okay)
-+		free_vm_area(area);
-+	else
-+		xenbus_dev_error(dev, op.status,
-+				 "unmapping page at handle %d error %d",
-+				 (int16_t)area->phys_addr, op.status);
++	/* Cancel pending watch events. */
++	spin_lock(&watch_events_lock);
++	list_for_each_entry_safe(msg, tmp, &watch_events, list) {
++		if (msg->u.watch.handle != watch)
++			continue;
++		list_del(&msg->list);
++		kfree(msg->u.watch.vec);
++		kfree(msg);
++	}
++	spin_unlock(&watch_events_lock);
 +
-+	return op.status;
++	/* Flush any currently-executing callback, unless we are it. :-) */
++	if (current->pid != xenwatch_pid) {
++		mutex_lock(&xenwatch_mutex);
++		mutex_unlock(&xenwatch_mutex);
++	}
 +}
-+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
++EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
 +
++void xs_suspend(void)
++{
++	down_write(&xs_state.transaction_mutex);
++	down_write(&xs_state.watch_mutex);
++	mutex_lock(&xs_state.request_mutex);
++	mutex_lock(&xs_state.response_mutex);
++}
 +
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+		     grant_handle_t handle, void *vaddr)
++void xs_resume(void)
 +{
-+	struct gnttab_unmap_grant_ref op;
++	struct xenbus_watch *watch;
++	char token[sizeof(watch) * 2 + 1];
 +
-+	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
-+			    handle);
-+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+		BUG();
++	mutex_unlock(&xs_state.response_mutex);
++	mutex_unlock(&xs_state.request_mutex);
++	up_write(&xs_state.transaction_mutex);
 +
-+	if (op.status != GNTST_okay)
-+		xenbus_dev_error(dev, op.status,
-+				 "unmapping page at handle %d error %d",
-+				 handle, op.status);
++	/* No need for watches_lock: the watch_mutex is sufficient. */
++	list_for_each_entry(watch, &watches, list) {
++		sprintf(token, "%lX", (long)watch);
++		xs_watch(watch->node, token);
++	}
 +
-+	return op.status;
++	up_write(&xs_state.watch_mutex);
 +}
-+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
 +
-+int xenbus_dev_is_online(struct xenbus_device *dev)
++void xs_suspend_cancel(void)
 +{
-+	int rc, val;
++	mutex_unlock(&xs_state.response_mutex);
++	mutex_unlock(&xs_state.request_mutex);
++	up_write(&xs_state.watch_mutex);
++	up_write(&xs_state.transaction_mutex);
++}
 +
-+	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
-+	if (rc != 1)
-+		val = 0; /* no online node present */
++static int xenwatch_handle_callback(void *data)
++{
++	struct xs_stored_msg *msg = data;
 +
-+	return val;
++	msg->u.watch.handle->callback(msg->u.watch.handle,
++				      (const char **)msg->u.watch.vec,
++				      msg->u.watch.vec_size);
++
++	kfree(msg->u.watch.vec);
++	kfree(msg);
++
++	/* Kill this kthread if we were spawned just for this callback. */
++	if (current->pid != xenwatch_pid)
++		do_exit(0);
++
++	return 0;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_client.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_client.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_client.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_client.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,283 @@
-+/******************************************************************************
-+ * Client-facing interface for the Xenbus driver.  In other words, the
-+ * interface between the Xenbus and the device-specific code, be it the
-+ * frontend or the backend of that driver.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++static int xenwatch_thread(void *unused)
++{
++	struct list_head *ent;
++	struct xs_stored_msg *msg;
++
++	current->flags |= PF_NOFREEZE;
++	for (;;) {
++		wait_event_interruptible(watch_events_waitq,
++					 !list_empty(&watch_events));
++
++		if (kthread_should_stop())
++			break;
++
++		mutex_lock(&xenwatch_mutex);
++
++		spin_lock(&watch_events_lock);
++		ent = watch_events.next;
++		if (ent != &watch_events)
++			list_del(ent);
++		spin_unlock(&watch_events_lock);
 +
-+#include <xen/evtchn.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include <xen/driver_util.h>
++		if (ent == &watch_events) {
++			mutex_unlock(&xenwatch_mutex);
++			continue;
++		}
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++		msg = list_entry(ent, struct xs_stored_msg, list);
 +
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++		/*
++		 * Unlock the mutex before running an XBWF_new_thread
++		 * handler. kthread_run can block which can deadlock
++		 * against unregister_xenbus_watch() if we need to
++		 * unregister other watches in order to make
++		 * progress. This can occur on resume before the swap
++		 * device is attached.
++		 */
++		if (msg->u.watch.handle->flags & XBWF_new_thread) {
++			mutex_unlock(&xenwatch_mutex);
++			kthread_run(xenwatch_handle_callback,
++				    msg, "xenwatch_cb");
++		} else {
++			xenwatch_handle_callback(msg);
++			mutex_unlock(&xenwatch_mutex);
++		}
++	}
 +
-+const char *xenbus_strstate(enum xenbus_state state)
-+{
-+	static const char *const name[] = {
-+		[ XenbusStateUnknown      ] = "Unknown",
-+		[ XenbusStateInitialising ] = "Initialising",
-+		[ XenbusStateInitWait     ] = "InitWait",
-+		[ XenbusStateInitialised  ] = "Initialised",
-+		[ XenbusStateConnected    ] = "Connected",
-+		[ XenbusStateClosing      ] = "Closing",
-+		[ XenbusStateClosed	  ] = "Closed",
-+	};
-+	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
++	return 0;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_strstate);
 +
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+		      struct xenbus_watch *watch,
-+		      void (*callback)(struct xenbus_watch *,
-+				       const char **, unsigned int))
++static int process_msg(void)
 +{
++	struct xs_stored_msg *msg;
++	char *body;
 +	int err;
 +
-+	watch->node = path;
-+	watch->callback = callback;
++	/*
++	 * We must disallow save/restore while reading a xenstore message.
++	 * A partial read across s/r leaves us out of sync with xenstored.
++	 */
++	for (;;) {
++		err = xb_wait_for_data_to_read();
++		if (err)
++			return err;
++		mutex_lock(&xs_state.response_mutex);
++		if (xb_data_to_read())
++			break;
++		/* We raced with save/restore: pending data 'disappeared'. */
++		mutex_unlock(&xs_state.response_mutex);
++	}
 +
-+	err = register_xenbus_watch(watch);
 +
++	msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
++	if (msg == NULL) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	err = xb_read(&msg->hdr, sizeof(msg->hdr));
 +	if (err) {
-+		watch->node = NULL;
-+		watch->callback = NULL;
-+		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
++		kfree(msg);
++		goto out;
 +	}
 +
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_watch_path);
++	body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
++	if (body == NULL) {
++		kfree(msg);
++		err = -ENOMEM;
++		goto out;
++	}
 +
++	err = xb_read(body, msg->hdr.len);
++	if (err) {
++		kfree(body);
++		kfree(msg);
++		goto out;
++	}
++	body[msg->hdr.len] = '\0';
 +
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+		       const char *path2, struct xenbus_watch *watch,
-+		       void (*callback)(struct xenbus_watch *,
-+					const char **, unsigned int))
-+{
-+	int err;
-+	char *state = kasprintf(GFP_KERNEL, "%s/%s", path, path2);
-+	if (!state) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
-+		return -ENOMEM;
++	if (msg->hdr.type == XS_WATCH_EVENT) {
++		msg->u.watch.vec = split(body, msg->hdr.len,
++					 &msg->u.watch.vec_size);
++		if (IS_ERR(msg->u.watch.vec)) {
++			err = PTR_ERR(msg->u.watch.vec);
++			kfree(msg);
++			goto out;
++		}
++
++		spin_lock(&watches_lock);
++		msg->u.watch.handle = find_watch(
++			msg->u.watch.vec[XS_WATCH_TOKEN]);
++		if (msg->u.watch.handle != NULL) {
++			spin_lock(&watch_events_lock);
++			list_add_tail(&msg->list, &watch_events);
++			wake_up(&watch_events_waitq);
++			spin_unlock(&watch_events_lock);
++		} else {
++			kfree(msg->u.watch.vec);
++			kfree(msg);
++		}
++		spin_unlock(&watches_lock);
++	} else {
++		msg->u.reply.body = body;
++		spin_lock(&xs_state.reply_lock);
++		list_add_tail(&msg->list, &xs_state.reply_list);
++		spin_unlock(&xs_state.reply_lock);
++		wake_up(&xs_state.reply_waitq);
 +	}
-+	err = xenbus_watch_path(dev, state, watch, callback);
 +
-+	if (err)
-+		kfree(state);
++ out:
++	mutex_unlock(&xs_state.response_mutex);
 +	return err;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_watch_path2);
-+
 +
-+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
++static int xenbus_thread(void *unused)
 +{
-+	/* We check whether the state is currently set to the given value, and
-+	   if not, then the state is set.  We don't want to unconditionally
-+	   write the given state, because we don't want to fire watches
-+	   unnecessarily.  Furthermore, if the node has gone, we don't write
-+	   to it, as the device will be tearing down, and we don't want to
-+	   resurrect that directory.
++	int err;
 +
-+	   Note that, because of this cached value of our state, this function
-+	   will not work inside a Xenstore transaction (something it was
-+	   trying to in the past) because dev->state would not get reset if
-+	   the transaction was aborted.
++	current->flags |= PF_NOFREEZE;
++	for (;;) {
++		err = process_msg();
++		if (err)
++			printk(KERN_WARNING "XENBUS error %d while reading "
++			       "message\n", err);
++		if (kthread_should_stop())
++			break;
++	}
 +
-+	 */
++	return 0;
++}
 +
-+	int current_state;
++int xs_init(void)
++{
 +	int err;
++	struct task_struct *task;
 +
-+	if (state == dev->state)
-+		return 0;
++	INIT_LIST_HEAD(&xs_state.reply_list);
++	spin_lock_init(&xs_state.reply_lock);
++	init_waitqueue_head(&xs_state.reply_waitq);
 +
-+	err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
-+			   &current_state);
-+	if (err != 1)
-+		return 0;
++	mutex_init(&xs_state.request_mutex);
++	mutex_init(&xs_state.response_mutex);
++	init_rwsem(&xs_state.transaction_mutex);
++	init_rwsem(&xs_state.watch_mutex);
 +
-+	err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
-+	if (err) {
-+		if (state != XenbusStateClosing) /* Avoid looping */
-+			xenbus_dev_fatal(dev, err, "writing new state");
++	/* Initialize the shared memory rings to talk to xenstored */
++	err = xb_init_comms();
++	if (err)
 +		return err;
-+	}
 +
-+	dev->state = state;
++	task = kthread_run(xenwatch_thread, NULL, "xenwatch");
++	if (IS_ERR(task))
++		return PTR_ERR(task);
++	xenwatch_pid = task->pid;
 +
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_switch_state);
++	task = kthread_run(xenbus_thread, NULL, "xenbus");
++	if (IS_ERR(task))
++		return PTR_ERR(task);
 +
-+int xenbus_frontend_closed(struct xenbus_device *dev)
-+{
-+	xenbus_switch_state(dev, XenbusStateClosed);
-+	complete(&dev->down);
 +	return 0;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
-+
+diff -r d894e36cfc30 -r 0aa021803deb drivers/xen/xenoprof/xenoprofile.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/drivers/xen/xenoprof/xenoprofile.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,545 @@
 +/**
-+ * Return the path to the error node for the given device, or NULL on failure.
-+ * If the value returned is non-NULL, then it is the caller's to kfree.
++ * @file xenoprofile.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon at movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * Separated out arch-generic part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
 + */
-+static char *error_path(struct xenbus_device *dev)
-+{
-+	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
-+}
 +
++#include <linux/init.h>
++#include <linux/notifier.h>
++#include <linux/smp.h>
++#include <linux/oprofile.h>
++#include <linux/sysdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <asm/pgtable.h>
++#include <xen/evtchn.h>
++#include <xen/xenoprof.h>
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include "../../../drivers/oprofile/cpu_buffer.h"
++#include "../../../drivers/oprofile/event_buffer.h"
 +
-+void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		va_list ap)
-+{
-+	int ret;
-+	unsigned int len;
-+	char *printf_buffer = NULL, *path_buffer = NULL;
++#define MAX_XENOPROF_SAMPLES 16
 +
-+#define PRINTF_BUFFER_SIZE 4096
-+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+	if (printf_buffer == NULL)
-+		goto fail;
++/* sample buffers shared with Xen */
++static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
++/* Shared buffer area */
++static struct xenoprof_shared_buffer shared_buffer;
 +
-+	len = sprintf(printf_buffer, "%i ", -err);
-+	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
++/* Passive sample buffers shared with Xen */
++static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
++/* Passive shared buffer area */
++static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
 +
-+	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
++static int xenoprof_start(void);
++static void xenoprof_stop(void);
 +
-+	dev_err(&dev->dev, "%s\n", printf_buffer);
++static int xenoprof_enabled = 0;
++static int xenoprof_is_primary = 0;
++static int active_defined;
 +
-+	path_buffer = error_path(dev);
++extern unsigned long backtrace_depth;
 +
-+	if (path_buffer == NULL) {
-+		printk("xenbus: failed to write error node for %s (%s)\n",
-+		       dev->nodename, printf_buffer);
-+		goto fail;
-+	}
++/* Number of buffers in shared area (one per VCPU) */
++static int nbuf;
++/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
++static int ovf_irq[NR_CPUS];
++/* cpu model type string - copied from Xen on XENOPROF_init command */
++static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
 +
-+	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
-+		printk("xenbus: failed to write error node for %s (%s)\n",
-+		       dev->nodename, printf_buffer);
-+		goto fail;
-+	}
++#ifdef CONFIG_PM
 +
-+fail:
-+	if (printf_buffer)
-+		kfree(printf_buffer);
-+	if (path_buffer)
-+		kfree(path_buffer);
++static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
++{
++	if (xenoprof_enabled == 1)
++		xenoprof_stop();
++	return 0;
 +}
 +
 +
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...)
++static int xenoprof_resume(struct sys_device * dev)
 +{
-+	va_list ap;
-+
-+	va_start(ap, fmt);
-+	_dev_error(dev, err, fmt, ap);
-+	va_end(ap);
++	if (xenoprof_enabled == 1)
++		xenoprof_start();
++	return 0;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_dev_error);
 +
 +
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...)
-+{
-+	va_list ap;
++static struct sysdev_class oprofile_sysclass = {
++	set_kset_name("oprofile"),
++	.resume		= xenoprof_resume,
++	.suspend	= xenoprof_suspend
++};
 +
-+	va_start(ap, fmt);
-+	_dev_error(dev, err, fmt, ap);
-+	va_end(ap);
 +
-+	xenbus_switch_state(dev, XenbusStateClosing);
++static struct sys_device device_oprofile = {
++	.id	= 0,
++	.cls	= &oprofile_sysclass,
++};
++
++
++static int __init init_driverfs(void)
++{
++	int error;
++	if (!(error = sysdev_class_register(&oprofile_sysclass)))
++		error = sysdev_register(&device_oprofile);
++	return error;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
 +
 +
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
++static void exit_driverfs(void)
 +{
-+	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
-+	if (err < 0)
-+		xenbus_dev_fatal(dev, err, "granting access to ring page");
-+	return err;
++	sysdev_unregister(&device_oprofile);
++	sysdev_class_unregister(&oprofile_sysclass);
 +}
-+EXPORT_SYMBOL_GPL(xenbus_grant_ring);
 +
++#else
++#define init_driverfs() do { } while (0)
++#define exit_driverfs() do { } while (0)
++#endif /* CONFIG_PM */
++
++static unsigned long long oprofile_samples;
++static unsigned long long p_oprofile_samples;
 +
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
++static unsigned int pdomains;
++static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++
++/* Check whether the given entry is an escape code */
++static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
 +{
-+	struct evtchn_alloc_unbound alloc_unbound;
-+	int err;
++	return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
++}
 +
-+	alloc_unbound.dom        = DOMID_SELF;
-+	alloc_unbound.remote_dom = dev->otherend_id;
++/* Get the event at the given entry  */
++static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
++{
++	return (buf->event_log[tail].event);
++}
 +
-+	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+					  &alloc_unbound);
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "allocating event channel");
-+	else
-+		*port = alloc_unbound.port;
++static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
++{
++	int head, tail, size;
++	int tracing = 0;
 +
-+	return err;
++	head = buf->event_head;
++	tail = buf->event_tail;
++	size = buf->event_size;
++
++	while (tail != head) {
++		if (xenoprof_is_escape(buf, tail) &&
++		    xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
++			tracing=1;
++			oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode, 
++					CPU_TRACE_BEGIN); 
++			if (!is_passive)
++				oprofile_samples++;
++			else
++				p_oprofile_samples++;
++			
++		} else {
++			oprofile_add_pc(buf->event_log[tail].eip,
++					buf->event_log[tail].mode,
++					buf->event_log[tail].event);
++			if (!tracing) {
++				if (!is_passive)
++					oprofile_samples++;
++				else
++					p_oprofile_samples++;
++			}
++       
++		}
++		tail++;
++		if(tail==size)
++		    tail=0;
++	}
++	buf->event_tail = tail;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
 +
++static void xenoprof_handle_passive(void)
++{
++	int i, j;
++	int flag_domain, flag_switch = 0;
++	
++	for (i = 0; i < pdomains; i++) {
++		flag_domain = 0;
++		for (j = 0; j < passive_domains[i].nbuf; j++) {
++			xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
++			if (buf->event_head == buf->event_tail)
++				continue;
++			if (!flag_domain) {
++				if (!oprofile_add_domain_switch(
++					passive_domains[i].domain_id))
++					goto done;
++				flag_domain = 1;
++			}
++			xenoprof_add_pc(buf, 1);
++			flag_switch = 1;
++		}
++	}
++done:
++	if (flag_switch)
++		oprofile_add_domain_switch(COORDINATOR_DOMAIN);
++}
 +
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
++static irqreturn_t 
++xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
 +{
-+	struct evtchn_close close;
-+	int err;
++	struct xenoprof_buf * buf;
++	static unsigned long flag;
 +
-+	close.port = port;
++	buf = xenoprof_buf[smp_processor_id()];
 +
-+	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+	if (err)
-+		xenbus_dev_error(dev, err, "freeing event channel %d", port);
++	xenoprof_add_pc(buf, 0);
 +
-+	return err;
++	if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
++		xenoprof_handle_passive();
++		smp_mb__before_clear_bit();
++		clear_bit(0, &flag);
++	}
++
++	return IRQ_HANDLED;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 +
 +
-+enum xenbus_state xenbus_read_driver_state(const char *path)
++static void unbind_virq(void)
 +{
-+	enum xenbus_state result;
-+	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
-+	if (err)
-+		result = XenbusStateUnknown;
++	unsigned int i;
 +
-+	return result;
++	for_each_online_cpu(i) {
++		if (ovf_irq[i] >= 0) {
++			unbind_from_irqhandler(ovf_irq[i], NULL);
++			ovf_irq[i] = -1;
++		}
++	}
 +}
-+EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_comms.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_comms.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_comms.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_comms.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,232 @@
-+/******************************************************************************
-+ * xenbus_comms.c
-+ *
-+ * Low level code to talks to Xen Store: ringbuffer and event channel.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
 +
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <linux/ptrace.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
 +
-+#include <asm/hypervisor.h>
++static int bind_virq(void)
++{
++	unsigned int i;
++	int result;
 +
-+#include "xenbus_comms.h"
++	for_each_online_cpu(i) {
++		result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
++						 i,
++						 xenoprof_ovf_interrupt,
++						 SA_INTERRUPT,
++						 "xenoprof",
++						 NULL);
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++		if (result < 0) {
++			unbind_virq();
++			return result;
++		}
 +
-+static int xenbus_irq;
++		ovf_irq[i] = result;
++	}
++		
++	return 0;
++}
 +
-+extern void xenbus_probe(void *);
-+extern int xenstored_ready;
-+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
 +
-+static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
++static void unmap_passive_list(void)
++{
++	int i;
++	for (i = 0; i < pdomains; i++)
++		xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++	pdomains = 0;
++}
++
++
++static int map_xenoprof_buffer(int max_samples)
++{
++	struct xenoprof_get_buffer get_buffer;
++	struct xenoprof_buf *buf;
++	int ret, i;
++
++	if ( shared_buffer.buffer )
++		return 0;
++
++	get_buffer.max_samples = max_samples;
++	ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
++	if (ret)
++		return ret;
++	nbuf = get_buffer.nbuf;
 +
-+static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
-+{
-+	if (unlikely(xenstored_ready == 0)) {
-+		xenstored_ready = 1;
-+		schedule_work(&probe_work);
++	for (i=0; i< nbuf; i++) {
++		buf = (struct xenoprof_buf*) 
++			&shared_buffer.buffer[i * get_buffer.bufsize];
++		BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++		xenoprof_buf[buf->vcpu_id] = buf;
 +	}
 +
-+	wake_up(&xb_waitq);
-+	return IRQ_HANDLED;
-+}
-+
-+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
-+{
-+	return ((prod - cons) <= XENSTORE_RING_SIZE);
++	return 0;
 +}
 +
-+static void *get_output_chunk(XENSTORE_RING_IDX cons,
-+			      XENSTORE_RING_IDX prod,
-+			      char *buf, uint32_t *len)
-+{
-+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
-+	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
-+		*len = XENSTORE_RING_SIZE - (prod - cons);
-+	return buf + MASK_XENSTORE_IDX(prod);
-+}
 +
-+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
-+				   XENSTORE_RING_IDX prod,
-+				   const char *buf, uint32_t *len)
++static int xenoprof_setup(void)
 +{
-+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
-+	if ((prod - cons) < *len)
-+		*len = prod - cons;
-+	return buf + MASK_XENSTORE_IDX(cons);
-+}
++	int ret;
 +
-+int xb_write(const void *data, unsigned len)
-+{
-+	struct xenstore_domain_interface *intf = xen_store_interface;
-+	XENSTORE_RING_IDX cons, prod;
-+	int rc;
++	if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
++		return ret;
 +
-+	while (len != 0) {
-+		void *dst;
-+		unsigned int avail;
++	if ( (ret = bind_virq()) )
++		return ret;
 +
-+		rc = wait_event_interruptible(
-+			xb_waitq,
-+			(intf->req_prod - intf->req_cons) !=
-+			XENSTORE_RING_SIZE);
-+		if (rc < 0)
-+			return rc;
++	if (xenoprof_is_primary) {
++		/* Define dom0 as an active domain if not done yet */
++		if (!active_defined) {
++			domid_t domid;
++			ret = HYPERVISOR_xenoprof_op(
++				XENOPROF_reset_active_list, NULL);
++			if (ret)
++				goto err;
++			domid = 0;
++			ret = HYPERVISOR_xenoprof_op(
++				XENOPROF_set_active, &domid);
++			if (ret)
++				goto err;
++			active_defined = 1;
++		}
 +
-+		/* Read indexes, then verify. */
-+		cons = intf->req_cons;
-+		prod = intf->req_prod;
-+		if (!check_indexes(cons, prod)) {
-+			intf->req_cons = intf->req_prod = 0;
-+			return -EIO;
++		if (backtrace_depth > 0) {
++			ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace, 
++						     &backtrace_depth);
++			if (ret)
++				backtrace_depth = 0;
 +		}
 +
-+		dst = get_output_chunk(cons, prod, intf->req, &avail);
-+		if (avail == 0)
-+			continue;
-+		if (avail > len)
-+			avail = len;
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
++		if (ret)
++			goto err;
++		
++		xenoprof_arch_counter();
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++		if (ret)
++			goto err;
++	}
 +
-+		/* Must write data /after/ reading the consumer index. */
-+		mb();
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
++	if (ret)
++		goto err;
 +
-+		memcpy(dst, data, avail);
-+		data += avail;
-+		len -= avail;
++	xenoprof_enabled = 1;
++	return 0;
++ err:
++	unbind_virq();
++	return ret;
++}
 +
-+		/* Other side must not see new producer until data is there. */
-+		wmb();
-+		intf->req_prod += avail;
 +
-+		/* Implies mb(): other side will see the updated producer. */
-+		notify_remote_via_evtchn(xen_store_evtchn);
++static void xenoprof_shutdown(void)
++{
++	xenoprof_enabled = 0;
++
++	WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
++
++	if (xenoprof_is_primary) {
++		WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
++					       NULL));
++		active_defined = 0;
 +	}
 +
-+	return 0;
++	unbind_virq();
++
++	xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++	if (xenoprof_is_primary)
++		unmap_passive_list();
 +}
 +
-+int xb_data_to_read(void)
++
++static int xenoprof_start(void)
 +{
-+	struct xenstore_domain_interface *intf = xen_store_interface;
-+	return (intf->rsp_cons != intf->rsp_prod);
++	int ret = 0;
++
++	if (xenoprof_is_primary)
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++	if (!ret)
++		xenoprof_arch_start();
++	return ret;
 +}
 +
-+int xb_wait_for_data_to_read(void)
++
++static void xenoprof_stop(void)
 +{
-+	return wait_event_interruptible(xb_waitq, xb_data_to_read());
++	if (xenoprof_is_primary)
++		WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
++	xenoprof_arch_stop();
 +}
 +
-+int xb_read(void *data, unsigned len)
++
++static int xenoprof_set_active(int * active_domains,
++			       unsigned int adomains)
 +{
-+	struct xenstore_domain_interface *intf = xen_store_interface;
-+	XENSTORE_RING_IDX cons, prod;
-+	int rc;
++	int ret = 0;
++	int i;
++	int set_dom0 = 0;
++	domid_t domid;
 +
-+	while (len != 0) {
-+		unsigned int avail;
-+		const char *src;
++	if (!xenoprof_is_primary)
++		return 0;
 +
-+		rc = xb_wait_for_data_to_read();
-+		if (rc < 0)
-+			return rc;
++	if (adomains > MAX_OPROF_DOMAINS)
++		return -E2BIG;
 +
-+		/* Read indexes, then verify. */
-+		cons = intf->rsp_cons;
-+		prod = intf->rsp_prod;
-+		if (!check_indexes(cons, prod)) {
-+			intf->rsp_cons = intf->rsp_prod = 0;
-+			return -EIO;
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++	if (ret)
++		return ret;
++
++	for (i=0; i<adomains; i++) {
++		domid = active_domains[i];
++		if (domid != active_domains[i]) {
++			ret = -EINVAL;
++			goto out;
 +		}
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++		if (ret)
++			goto out;
++		if (active_domains[i] == 0)
++			set_dom0 = 1;
++	}
++	/* dom0 must always be active but may not be in the list */ 
++	if (!set_dom0) {
++		domid = 0;
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++	}
 +
-+		src = get_input_chunk(cons, prod, intf->rsp, &avail);
-+		if (avail == 0)
-+			continue;
-+		if (avail > len)
-+			avail = len;
++out:
++	if (ret)
++		WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
++					       NULL));
++	active_defined = !ret;
++	return ret;
++}
 +
-+		/* Must read data /after/ reading the producer index. */
-+		rmb();
++static int xenoprof_set_passive(int * p_domains,
++                                unsigned int pdoms)
++{
++	int ret;
++	unsigned int i, j;
++	struct xenoprof_buf *buf;
 +
-+		memcpy(data, src, avail);
-+		data += avail;
-+		len -= avail;
++	if (!xenoprof_is_primary)
++        	return 0;
 +
-+		/* Other side must not see free space until we've copied out */
-+		mb();
-+		intf->rsp_cons += avail;
++	if (pdoms > MAX_OPROF_DOMAINS)
++		return -E2BIG;
 +
-+		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
++	if (ret)
++		return ret;
++	unmap_passive_list();
 +
-+		/* Implies mb(): other side will see the updated consumer. */
-+		notify_remote_via_evtchn(xen_store_evtchn);
++	for (i = 0; i < pdoms; i++) {
++		passive_domains[i].domain_id = p_domains[i];
++		passive_domains[i].max_samples = 2048;
++		ret = xenoprof_arch_set_passive(&passive_domains[i],
++						&p_shared_buffer[i]);
++		if (ret)
++			goto out;
++		for (j = 0; j < passive_domains[i].nbuf; j++) {
++			buf = (struct xenoprof_buf *)
++				&p_shared_buffer[i].buffer[
++				j * passive_domains[i].bufsize];
++			BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++			p_xenoprof_buf[i][buf->vcpu_id] = buf;
++		}
 +	}
 +
++	pdomains = pdoms;
 +	return 0;
++
++out:
++	for (j = 0; j < i; j++)
++		xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++
++	return ret;
 +}
 +
-+/* Set up interrupt handler off store event channel. */
-+int xb_init_comms(void)
++
++/* The dummy backtrace function to keep oprofile happy
++ * The real backtrace is done in xen
++ */
++static void xenoprof_dummy_backtrace(struct pt_regs * const regs, 
++				     unsigned int depth)
 +{
-+	struct xenstore_domain_interface *intf = xen_store_interface;
-+	int err;
++	/* this should never be called */
++	BUG();
++	return;
++}
 +
-+	if (intf->req_prod != intf->req_cons)
-+		printk(KERN_ERR "XENBUS request ring is not quiescent "
-+		       "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
 +
-+	if (intf->rsp_prod != intf->rsp_cons) {
-+		printk(KERN_WARNING "XENBUS response ring is not quiescent "
-+		       "(%08x:%08x): fixing up\n",
-+		       intf->rsp_cons, intf->rsp_prod);
-+		intf->rsp_cons = intf->rsp_prod;
-+	}
++static struct oprofile_operations xenoprof_ops = {
++#ifdef HAVE_XENOPROF_CREATE_FILES
++	.create_files 	= xenoprof_create_files,
++#endif
++	.set_active	= xenoprof_set_active,
++	.set_passive    = xenoprof_set_passive,
++	.setup 		= xenoprof_setup,
++	.shutdown	= xenoprof_shutdown,
++	.start		= xenoprof_start,
++	.stop		= xenoprof_stop,
++	.backtrace	= xenoprof_dummy_backtrace
++};
 +
-+	if (xenbus_irq)
-+		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
 +
-+	err = bind_caller_port_to_irqhandler(
-+		xen_store_evtchn, wake_waiting,
-+		0, "xenbus", &xb_waitq);
-+	if (err <= 0) {
-+		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
-+		return err;
-+	}
++/* in order to get driverfs right */
++static int using_xenoprof;
 +
-+	xenbus_irq = err;
++int __init xenoprofile_init(struct oprofile_operations * ops)
++{
++	struct xenoprof_init init;
++	unsigned int i;
++	int ret;
 +
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_comms.h tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_comms.h
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_comms.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_comms.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,46 @@
-+/*
-+ * Private include for xenbus communications.
-+ * 
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++	if (!ret) {
++		xenoprof_arch_init_counter(&init);
++		xenoprof_is_primary = init.is_primary;
 +
-+#ifndef _XENBUS_COMMS_H
-+#define _XENBUS_COMMS_H
++		/*  cpu_type is detected by Xen */
++		cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
++		strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
++		xenoprof_ops.cpu_type = cpu_type;
 +
-+int xs_init(void);
-+int xb_init_comms(void);
++		init_driverfs();
++		using_xenoprof = 1;
++		*ops = xenoprof_ops;
 +
-+/* Low level routines. */
-+int xb_write(const void *data, unsigned len);
-+int xb_read(void *data, unsigned len);
-+int xb_data_to_read(void);
-+int xb_wait_for_data_to_read(void);
-+int xs_input_avail(void);
-+extern struct xenstore_domain_interface *xen_store_interface;
-+extern int xen_store_evtchn;
++		for (i=0; i<NR_CPUS; i++)
++			ovf_irq[i] = -1;
 +
-+#endif /* _XENBUS_COMMS_H */
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_dev.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_dev.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_dev.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_dev.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,404 @@
-+/*
-+ * xenbus_dev.c
-+ * 
-+ * Driver giving user-space access to the kernel's xenbus connection
-+ * to xenstore.
-+ * 
-+ * Copyright (c) 2005, Christian Limpach
-+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++		active_defined = 0;
++	}
 +
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/uio.h>
-+#include <linux/notifier.h>
-+#include <linux/wait.h>
-+#include <linux/fs.h>
-+#include <linux/poll.h>
-+#include <linux/mutex.h>
++	printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
++	       __func__, ret, init.num_events, xenoprof_is_primary);
++	return ret;
++}
 +
-+#include "xenbus_comms.h"
 +
-+#include <asm/uaccess.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
++void xenoprofile_exit(void)
++{
++	if (using_xenoprof)
++		exit_driverfs();
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
++	xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++	if (xenoprof_is_primary) {
++		unmap_passive_list();
++		WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
++        }
++}
+diff -r d894e36cfc30 -r 0aa021803deb fs/Kconfig
+--- a/fs/Kconfig	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/Kconfig	Wed Sep 10 10:54:08 2008 +0100
+@@ -826,6 +826,11 @@
+         help
+         Exports the dump image of crashed kernel in ELF format.
+ 
++config PROC_IOMEM_MACHINE
++	bool
++	depends on PROC_FS && EXPERIMENTAL && KEXEC && XEN && IA64
++	default y
++
+ config SYSFS
+ 	bool "sysfs file system support" if EMBEDDED
+ 	default y
+@@ -865,6 +870,7 @@
+ config HUGETLBFS
+ 	bool "HugeTLB file system support"
+ 	depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
++	depends on !XEN
+ 	help
+ 	  hugetlbfs is a filesystem backing for HugeTLB pages, based on
+ 	  ramfs. For architectures that support it, say Y here and read
+diff -r d894e36cfc30 -r 0aa021803deb fs/aio.c
+--- a/fs/aio.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/aio.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -33,6 +33,11 @@
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
++
++#ifdef CONFIG_EPOLL
++#include <linux/poll.h>
++#include <linux/eventpoll.h>
++#endif
+ 
+ #if DEBUG > 1
+ #define dprintk		printk
+@@ -1015,6 +1020,10 @@
+ 	if (waitqueue_active(&ctx->wait))
+ 		wake_up(&ctx->wait);
+ 
++#ifdef CONFIG_EPOLL
++	if (ctx->file && waitqueue_active(&ctx->poll_wait))
++		wake_up(&ctx->poll_wait);
++#endif
+ 	if (ret)
+ 		put_ioctx(ctx);
+ 
+@@ -1024,6 +1033,8 @@
+ /* aio_read_evt
+  *	Pull an event off of the ioctx's event ring.  Returns the number of 
+  *	events fetched (0 or 1 ;-)
++ *	If ent parameter is 0, just returns the number of events that would
++ *	be fetched.
+  *	FIXME: make this use cmpxchg.
+  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
+  */
+@@ -1046,13 +1057,18 @@
+ 
+ 	head = ring->head % info->nr;
+ 	if (head != ring->tail) {
+-		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+-		*ent = *evp;
+-		head = (head + 1) % info->nr;
+-		smp_mb(); /* finish reading the event before updatng the head */
+-		ring->head = head;
+-		ret = 1;
+-		put_aio_ring_event(evp, KM_USER1);
++		if (ent) { /* event requested */
++			struct io_event *evp =
++				aio_ring_event(info, head, KM_USER1);
++			*ent = *evp;
++			head = (head + 1) % info->nr;
++			/* finish reading the event before updatng the head */
++			smp_mb();
++			ring->head = head;
++			ret = 1;
++			put_aio_ring_event(evp, KM_USER1);
++		} else /* only need to know availability */
++			ret = 1;
+ 	}
+ 	spin_unlock(&info->ring_lock);
+ 
+@@ -1235,8 +1251,77 @@
+ 
+ 	aio_cancel_all(ioctx);
+ 	wait_for_all_aios(ioctx);
++#ifdef CONFIG_EPOLL
++	/* forget the poll file, but it's up to the user to close it */
++	if (ioctx->file) {
++		ioctx->file->private_data = 0;
++		ioctx->file = 0;
++	}
 +#endif
+ 	put_ioctx(ioctx);	/* once for the lookup */
+ }
 +
-+struct xenbus_dev_transaction {
-+	struct list_head list;
-+	struct xenbus_transaction handle;
-+};
++#ifdef CONFIG_EPOLL
 +
-+struct read_buffer {
-+	struct list_head list;
-+	unsigned int cons;
-+	unsigned int len;
-+	char msg[];
-+};
++static int aio_queue_fd_close(struct inode *inode, struct file *file)
++{
++	struct kioctx *ioctx = file->private_data;
++	if (ioctx) {
++		file->private_data = 0;
++		spin_lock_irq(&ioctx->ctx_lock);
++		ioctx->file = 0;
++		spin_unlock_irq(&ioctx->ctx_lock);
++	}
++	return 0;
++}
 +
-+struct xenbus_dev_data {
-+	/* In-progress transaction. */
-+	struct list_head transactions;
++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
++{	unsigned int pollflags = 0;
++	struct kioctx *ioctx = file->private_data;
 +
-+	/* Active watches. */
-+	struct list_head watches;
++	if (ioctx) {
 +
-+	/* Partial request. */
-+	unsigned int len;
-+	union {
-+		struct xsd_sockmsg msg;
-+		char buffer[PAGE_SIZE];
-+	} u;
++		spin_lock_irq(&ioctx->ctx_lock);
++		/* Insert inside our poll wait queue */
++		poll_wait(file, &ioctx->poll_wait, wait);
 +
-+	/* Response queue. */
-+	struct list_head read_buffers;
-+	wait_queue_head_t read_waitq;
++		/* Check our condition */
++		if (aio_read_evt(ioctx, 0))
++			pollflags = POLLIN | POLLRDNORM;
++		spin_unlock_irq(&ioctx->ctx_lock);
++	}
 +
-+	struct mutex reply_mutex;
++	return pollflags;
++}
++
++static const struct file_operations aioq_fops = {
++	.release	= aio_queue_fd_close,
++	.poll		= aio_queue_fd_poll
 +};
 +
-+static struct proc_dir_entry *xenbus_dev_intf;
++/* make_aio_fd:
++ *  Create a file descriptor that can be used to poll the event queue.
++ *  Based and piggybacked on the excellent epoll code.
++ */
 +
-+static ssize_t xenbus_dev_read(struct file *filp,
-+			       char __user *ubuf,
-+			       size_t len, loff_t *ppos)
++static int make_aio_fd(struct kioctx *ioctx)
 +{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	struct read_buffer *rb;
-+	int i, ret;
-+
-+	mutex_lock(&u->reply_mutex);
-+	while (list_empty(&u->read_buffers)) {
-+		mutex_unlock(&u->reply_mutex);
-+		ret = wait_event_interruptible(u->read_waitq,
-+					       !list_empty(&u->read_buffers));
-+		if (ret)
-+			return ret;
-+		mutex_lock(&u->reply_mutex);
-+	}
++	int error, fd;
++	struct inode *inode;
++	struct file *file;
 +
-+	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
-+	for (i = 0; i < len;) {
-+		put_user(rb->msg[rb->cons], ubuf + i);
-+		i++;
-+		rb->cons++;
-+		if (rb->cons == rb->len) {
-+			list_del(&rb->list);
-+			kfree(rb);
-+			if (list_empty(&u->read_buffers))
-+				break;
-+			rb = list_entry(u->read_buffers.next,
-+					struct read_buffer, list);
-+		}
-+	}
-+	mutex_unlock(&u->reply_mutex);
++	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
++	if (error)
++		return error;
 +
-+	return i;
++	/* associate the file with the IO context */
++	file->private_data = ioctx;
++	ioctx->file = file;
++	init_waitqueue_head(&ioctx->poll_wait);
++	return fd;
 +}
++#endif
 +
-+static void queue_reply(struct xenbus_dev_data *u,
-+			char *data, unsigned int len)
-+{
-+	struct read_buffer *rb;
+ 
+ /* sys_io_setup:
+  *	Create an aio_context capable of receiving at least nr_events.
+@@ -1250,18 +1335,30 @@
+  *	resources are available.  May fail with -EFAULT if an invalid
+  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
+  *	implemented.
++ *
++ *	To request a selectable fd, the user context has to be initialized
++ *	to 1, instead of 0, and the return value is the fd.
++ *	This keeps the system call compatible, since a non-zero value
++ *	was not allowed so far.
+  */
+ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
+ {
+ 	struct kioctx *ioctx = NULL;
+ 	unsigned long ctx;
+ 	long ret;
++	int make_fd = 0;
+ 
+ 	ret = get_user(ctx, ctxp);
+ 	if (unlikely(ret))
+ 		goto out;
+ 
+ 	ret = -EINVAL;
++#ifdef CONFIG_EPOLL
++	if (ctx == 1) {
++		make_fd = 1;
++		ctx = 0;
++	}
++#endif
+ 	if (unlikely(ctx || nr_events == 0)) {
+ 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+ 		         ctx, nr_events);
+@@ -1272,8 +1369,12 @@
+ 	ret = PTR_ERR(ioctx);
+ 	if (!IS_ERR(ioctx)) {
+ 		ret = put_user(ioctx->user_id, ctxp);
+-		if (!ret)
+-			return 0;
++#ifdef CONFIG_EPOLL
++		if (make_fd && ret >= 0)
++			ret = make_aio_fd(ioctx);
++#endif
++		if (ret >= 0)
++			return ret;
+ 
+ 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ 		io_destroy(ioctx);
+diff -r d894e36cfc30 -r 0aa021803deb fs/binfmt_elf.c
+--- a/fs/binfmt_elf.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/binfmt_elf.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -1179,6 +1179,10 @@
+  */
+ static int maydump(struct vm_area_struct *vma)
+ {
++	/* The vma can be set up to tell us the answer directly.  */
++	if (vma->vm_flags & VM_ALWAYSDUMP)
++		return 1;
 +
-+	if (len == 0)
-+		return;
+ 	/* Do not dump I/O mapped devices or special mappings */
+ 	if (vma->vm_flags & (VM_IO | VM_RESERVED))
+ 		return 0;
+diff -r d894e36cfc30 -r 0aa021803deb fs/bio.c
+--- a/fs/bio.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/bio.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -112,7 +112,8 @@
+ 
+ 	BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+ 
+-	mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
++	if (bio->bi_io_vec)
++		mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+ 	mempool_free(bio, bio_set->bio_pool);
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb fs/compat_ioctl.c
+--- a/fs/compat_ioctl.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/compat_ioctl.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -122,6 +122,13 @@
+ #include <linux/dvb/frontend.h>
+ #include <linux/dvb/video.h>
+ #include <linux/lp.h>
 +
-+	rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
-+	BUG_ON(rb == NULL);
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/public/evtchn.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++#endif
+ 
+ /* Aiee. Someone does not find a difference between int and long */
+ #define EXT2_IOC32_GETFLAGS               _IOR('f', 1, int)
+@@ -2899,6 +2906,18 @@
+ /*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
+ COMPATIBLE_IOCTL(LPGETFLAGS)
+ HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
++
++#ifdef CONFIG_XEN
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32)
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32)
++COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_UNBOUND_PORT)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_UNBIND)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_NOTIFY)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_RESET)
++#endif
+ };
+ 
+ int ioctl_table_size = ARRAY_SIZE(ioctl_start);
+diff -r d894e36cfc30 -r 0aa021803deb fs/eventpoll.c
+--- a/fs/eventpoll.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/eventpoll.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -236,8 +236,6 @@
+ 
+ static void ep_poll_safewake_init(struct poll_safewake *psw);
+ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
+-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
+-		    struct eventpoll *ep);
+ static int ep_alloc(struct eventpoll **pep);
+ static void ep_free(struct eventpoll *ep);
+ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
+@@ -267,7 +265,7 @@
+ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ 		   int maxevents, long timeout);
+ static int eventpollfs_delete_dentry(struct dentry *dentry);
+-static struct inode *ep_eventpoll_inode(void);
++static struct inode *ep_eventpoll_inode(const struct file_operations *fops);
+ static int eventpollfs_get_sb(struct file_system_type *fs_type,
+ 			      int flags, const char *dev_name,
+ 			      void *data, struct vfsmount *mnt);
+@@ -517,7 +515,7 @@
+ 	 * Creates all the items needed to setup an eventpoll file. That is,
+ 	 * a file structure, and inode and a free file descriptor.
+ 	 */
+-	error = ep_getfd(&fd, &inode, &file, ep);
++	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
+ 	if (error)
+ 		goto eexit_2;
+ 
+@@ -702,8 +700,8 @@
+ /*
+  * Creates the file descriptor to be used by the epoll interface.
+  */
+-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
+-		    struct eventpoll *ep)
++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
++		    struct eventpoll *ep, const struct file_operations *fops)
+ {
+ 	struct qstr this;
+ 	char name[32];
+@@ -719,7 +717,7 @@
+ 		goto eexit_1;
+ 
+ 	/* Allocates an inode from the eventpoll file system */
+-	inode = ep_eventpoll_inode();
++	inode = ep_eventpoll_inode(fops);
+ 	error = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+ 		goto eexit_2;
+@@ -750,7 +748,7 @@
+ 
+ 	file->f_pos = 0;
+ 	file->f_flags = O_RDONLY;
+-	file->f_op = &eventpoll_fops;
++	file->f_op = fops;
+ 	file->f_mode = FMODE_READ;
+ 	file->f_version = 0;
+ 	file->private_data = ep;
+@@ -1569,7 +1567,7 @@
+ }
+ 
+ 
+-static struct inode *ep_eventpoll_inode(void)
++static struct inode *ep_eventpoll_inode(const struct file_operations *fops)
+ {
+ 	int error = -ENOMEM;
+ 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
+@@ -1577,7 +1575,7 @@
+ 	if (!inode)
+ 		goto eexit_1;
+ 
+-	inode->i_fop = &eventpoll_fops;
++	inode->i_fop = fops;
+ 
+ 	/*
+ 	 * Mark the inode dirty from the very beginning,
+diff -r d894e36cfc30 -r 0aa021803deb fs/lockd/svc.c
+--- a/fs/lockd/svc.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/lockd/svc.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -323,9 +323,6 @@
+  * Sysctl parameters (same as module parameters, different interface).
+  */
+ 
+-/* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */
+-#define CTL_UNNUMBERED		-2
+-
+ static ctl_table nlm_sysctls[] = {
+ 	{
+ 		.ctl_name	= CTL_UNNUMBERED,
+diff -r d894e36cfc30 -r 0aa021803deb fs/nfs/sysctl.c
+--- a/fs/nfs/sysctl.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/nfs/sysctl.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -18,11 +18,6 @@
+ static const int nfs_set_port_min = 0;
+ static const int nfs_set_port_max = 65535;
+ static struct ctl_table_header *nfs_callback_sysctl_table;
+-/*
+- * Something that isn't CTL_ANY, CTL_NONE or a value that may clash.
+- * Use the same values as fs/lockd/svc.c
+- */
+-#define CTL_UNNUMBERED -2
+ 
+ static ctl_table nfs_cb_sysctls[] = {
+ #ifdef CONFIG_NFS_V4
+diff -r d894e36cfc30 -r 0aa021803deb fs/proc/proc_misc.c
+--- a/fs/proc/proc_misc.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/proc/proc_misc.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -471,7 +471,7 @@
+ 		(unsigned long long)cputime64_to_clock_t(irq),
+ 		(unsigned long long)cputime64_to_clock_t(softirq),
+ 		(unsigned long long)cputime64_to_clock_t(steal));
+-	for_each_online_cpu(i) {
++	for_each_possible_cpu(i) {
+ 
+ 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
+ 		user = kstat_cpu(i).cpustat.user;
+diff -r d894e36cfc30 -r 0aa021803deb fs/proc/vmcore.c
+--- a/fs/proc/vmcore.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/proc/vmcore.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -514,7 +514,7 @@
+ 	/* Do some basic Verification. */
+ 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
+ 		(ehdr.e_type != ET_CORE) ||
+-		!elf_check_arch(&ehdr) ||
++		!vmcore_elf_check_arch(&ehdr) ||
+ 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
+ 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
+ 		ehdr.e_version != EV_CURRENT ||
+diff -r d894e36cfc30 -r 0aa021803deb fs/xfs/linux-2.6/xfs_buf.c
+--- a/fs/xfs/linux-2.6/xfs_buf.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/fs/xfs/linux-2.6/xfs_buf.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -181,6 +181,19 @@
+ 	void		*addr)
+ {
+ 	a_list_t	*aentry;
 +
-+	rb->cons = 0;
-+	rb->len = len;
++#ifdef CONFIG_XEN
++	/*
++	 * Xen needs to be able to make sure it can get an exclusive
++	 * RO mapping of pages it wants to turn into a pagetable.  If
++	 * a newly allocated page is also still being vmap()ed by xfs,
++	 * it will cause pagetable construction to fail.  This is a
++	 * quick workaround to always eagerly unmap pages so that Xen
++	 * is happy.
++	 */
++	vunmap(addr);
++	return;
++#endif
+ 
+ 	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
+ 	if (likely(aentry)) {
+diff -r d894e36cfc30 -r 0aa021803deb include/acpi/aclocal.h
+--- a/include/acpi/aclocal.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/acpi/aclocal.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -708,7 +708,7 @@
+  * must be preserved.
+  */
+ #define ACPI_PM1_STATUS_PRESERVED_BITS          0x0800	/* Bit 11 */
+-#define ACPI_PM1_CONTROL_PRESERVED_BITS         0x0201	/* Bit 9, Bit 0 (SCI_EN) */
++#define ACPI_PM1_CONTROL_PRESERVED_BITS         0x0200	/* Bit 9 (whatever) */
+ 
+ /*
+  * Register IDs
+diff -r d894e36cfc30 -r 0aa021803deb include/acpi/pdc_intel.h
+--- a/include/acpi/pdc_intel.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/acpi/pdc_intel.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -13,6 +13,7 @@
+ #define ACPI_PDC_SMP_C_SWCOORD		(0x0040)
+ #define ACPI_PDC_SMP_T_SWCOORD		(0x0080)
+ #define ACPI_PDC_C_C1_FFH		(0x0100)
++#define ACPI_PDC_C_C2C3_FFH		(0x0200)
+ 
+ #define ACPI_PDC_EST_CAPABILITY_SMP	(ACPI_PDC_SMP_C1PT | \
+ 					 ACPI_PDC_C_C1_HALT | \
+@@ -25,6 +26,8 @@
+ 
+ #define ACPI_PDC_C_CAPABILITY_SMP	(ACPI_PDC_SMP_C2C3 | \
+ 					 ACPI_PDC_SMP_C1PT | \
+-					 ACPI_PDC_C_C1_HALT)
++					 ACPI_PDC_C_C1_HALT | \
++					 ACPI_PDC_C_C1_FFH | \
++					 ACPI_PDC_C_C2C3_FFH)
+ 
+ #endif				/* __PDC_INTEL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/acpi/processor.h
+--- a/include/acpi/processor.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/acpi/processor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -21,6 +21,12 @@
+ #define ACPI_PSD_REV0_REVISION		0 /* Support for _PSD as in ACPI 3.0 */
+ #define ACPI_PSD_REV0_ENTRIES		5
+ 
++#ifdef CONFIG_XEN
++#define NR_ACPI_CPUS			(NR_CPUS < 256 ? 256 : NR_CPUS)
++#else
++#define NR_ACPI_CPUS			NR_CPUS
++#endif /* CONFIG_XEN */
 +
-+	memcpy(rb->msg, data, len);
+ /*
+  * Types of coordination defined in ACPI 3.0. Same macros can be used across
+  * P, C and T states
+@@ -32,6 +38,17 @@
+ /* Power Management */
+ 
+ struct acpi_processor_cx;
++
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct acpi_csd_package {
++	acpi_integer num_entries;
++	acpi_integer revision;
++	acpi_integer domain;
++	acpi_integer coord_type;
++	acpi_integer num_processors;
++	acpi_integer index;
++} __attribute__ ((packed));
++#endif
+ 
+ struct acpi_power_register {
+ 	u8 descriptor;
+@@ -63,6 +80,12 @@
+ 	u32 power;
+ 	u32 usage;
+ 	u64 time;
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++	/* Require raw information for external control logic */
++	struct acpi_power_register reg;
++	u32 csd_count;
++	struct acpi_csd_package *domain_info;
++#endif
+ 	struct acpi_processor_cx_policy promotion;
+ 	struct acpi_processor_cx_policy demotion;
+ };
+@@ -231,6 +254,9 @@
+ {
+ 	return;
+ }
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
++#else
+ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+ {
+ 	static unsigned int printout = 1;
+@@ -243,6 +269,7 @@
+ 	}
+ 	return 0;
+ }
++#endif				/* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
+ #endif				/* CONFIG_CPU_FREQ */
+ 
+ /* in processor_throttling.c */
+@@ -275,4 +302,79 @@
+ }
+ #endif
+ 
++/* 
++ * Following are interfaces geared to external processor PM control
++ * logic like a VMM
++ */
++/* Events notified to external control logic */
++#define PROCESSOR_PM_INIT	1
++#define PROCESSOR_PM_CHANGE	2
++#define PROCESSOR_HOTPLUG	3
 +
-+	list_add_tail(&rb->list, &u->read_buffers);
++/* Objects for the PM events */
++#define PM_TYPE_IDLE		0
++#define PM_TYPE_PERF		1
++#define PM_TYPE_THR		2
++#define PM_TYPE_MAX		3
 +
-+	wake_up(&u->read_waitq);
-+}
++/* Processor hotplug events */
++#define HOTPLUG_TYPE_ADD	0
++#define HOTPLUG_TYPE_REMOVE	1
 +
-+struct watch_adapter
-+{
-+	struct list_head list;
-+	struct xenbus_watch watch;
-+	struct xenbus_dev_data *dev_data;
-+	char *token;
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct processor_extcntl_ops {
++	/* Transfer processor PM events to external control logic */
++	int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
++	/* Notify physical processor status to external control logic */
++	int (*hotplug)(struct acpi_processor *pr, int event);
 +};
++extern const struct processor_extcntl_ops *processor_extcntl_ops;
 +
-+static void free_watch_adapter (struct watch_adapter *watch)
++static inline int processor_cntl_external(void)
 +{
-+	kfree(watch->watch.node);
-+	kfree(watch->token);
-+	kfree(watch);
++	return (processor_extcntl_ops != NULL);
 +}
 +
-+static void watch_fired(struct xenbus_watch *watch,
-+			const char **vec,
-+			unsigned int len)
++static inline int processor_pm_external(void)
 +{
-+	struct watch_adapter *adap =
-+            container_of(watch, struct watch_adapter, watch);
-+	struct xsd_sockmsg hdr;
-+	const char *path, *token;
-+	int path_len, tok_len, body_len;
-+
-+	path = vec[XS_WATCH_PATH];
-+	token = adap->token;
-+
-+	path_len = strlen(path) + 1;
-+	tok_len = strlen(token) + 1;
-+	body_len = path_len + tok_len;
-+
-+	hdr.type = XS_WATCH_EVENT;
-+	hdr.len = body_len;
-+
-+	mutex_lock(&adap->dev_data->reply_mutex);
-+	queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
-+	queue_reply(adap->dev_data, (char *)path, path_len);
-+	queue_reply(adap->dev_data, (char *)token, tok_len);
-+	mutex_unlock(&adap->dev_data->reply_mutex);
++	return processor_cntl_external() &&
++		(processor_extcntl_ops->pm_ops[PM_TYPE_IDLE] != NULL);
 +}
 +
-+static LIST_HEAD(watch_list);
++static inline int processor_pmperf_external(void)
++{
++	return processor_cntl_external() &&
++		(processor_extcntl_ops->pm_ops[PM_TYPE_PERF] != NULL);
++}
 +
-+static ssize_t xenbus_dev_write(struct file *filp,
-+				const char __user *ubuf,
-+				size_t len, loff_t *ppos)
++static inline int processor_pmthr_external(void)
 +{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	struct xenbus_dev_transaction *trans = NULL;
-+	uint32_t msg_type;
-+	void *reply;
-+	char *path, *token;
-+	struct watch_adapter *watch, *tmp_watch;
-+	int err, rc = len;
++	return processor_cntl_external() &&
++		(processor_extcntl_ops->pm_ops[PM_TYPE_THR] != NULL);
++}
 +
-+	if ((len + u->len) > sizeof(u->u.buffer)) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
++extern int processor_notify_external(struct acpi_processor *pr,
++			int event, int type);
++extern void processor_extcntl_init(void);
++extern int processor_extcntl_prepare(struct acpi_processor *pr);
++extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
++extern int acpi_processor_get_psd(struct acpi_processor *pr);
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **);
++#else
++static inline int processor_cntl_external(void) {return 0;}
++static inline int processor_pm_external(void) {return 0;}
++static inline int processor_pmperf_external(void) {return 0;}
++static inline int processor_pmthr_external(void) {return 0;}
++static inline int processor_notify_external(struct acpi_processor *pr,
++			int event, int type)
++{
++	return 0;
++}
++static inline void processor_extcntl_init(void) {}
++static inline int processor_extcntl_prepare(struct acpi_processor *pr)
++{
++	return 0;
++}
++#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
 +
-+	if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
-+		rc = -EFAULT;
-+		goto out;
-+	}
+ #endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-generic/pci.h
+--- a/include/asm-generic/pci.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-generic/pci.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -43,7 +43,9 @@
+ 	return root;
+ }
+ 
++#ifndef pcibios_scan_all_fns
+ #define pcibios_scan_all_fns(a, b)	0
++#endif
+ 
+ #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-generic/pgtable.h
+--- a/include/asm-generic/pgtable.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-generic/pgtable.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -188,6 +188,10 @@
+ })
+ #endif
+ 
++#ifndef arch_change_pte_range
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0
++#endif
 +
-+	u->len += len;
-+	if ((u->len < sizeof(u->u.msg)) ||
-+	    (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
-+		return rc;
+ #ifndef __ASSEMBLY__
+ /*
+  * When walking page tables, we usually want to skip any p?d_none entries;
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-generic/vmlinux.lds.h
+--- a/include/asm-generic/vmlinux.lds.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-generic/vmlinux.lds.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -194,3 +194,6 @@
+ 		.stab.index 0 : { *(.stab.index) }			\
+ 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
+ 		.comment 0 : { *(.comment) }
 +
-+	msg_type = u->u.msg.type;
++#define NOTES								\
++		.notes : { *(.note.*) } :note
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/acpi.h
+--- a/include/asm-i386/acpi.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/acpi.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -31,6 +31,9 @@
+ #include <acpi/pdc_intel.h>
+ 
+ #include <asm/system.h>		/* defines cmpxchg */
++#ifdef CONFIG_XEN
++#include <xen/interface/platform.h>
++#endif
+ 
+ #define COMPILER_DEPENDENT_INT64   long long
+ #define COMPILER_DEPENDENT_UINT64  unsigned long long
+@@ -156,6 +159,27 @@
+ }
+ extern int acpi_irq_balance_set(char *str);
+ 
++#ifdef CONFIG_XEN
++static inline int acpi_notify_hypervisor_state(u8 sleep_state,
++					       u32 pm1a_cnt_val,
++					       u32 pm1b_cnt_val)
++{
++	struct xen_platform_op op = {
++		.cmd = XENPF_enter_acpi_sleep,
++		.interface_version = XENPF_INTERFACE_VERSION,
++		.u = {
++			.enter_acpi_sleep = {
++				.pm1a_cnt_val = pm1a_cnt_val,
++				.pm1b_cnt_val = pm1b_cnt_val,
++				.sleep_state = sleep_state,
++			},
++		},
++	};
 +
-+	switch (msg_type) {
-+	case XS_TRANSACTION_START:
-+	case XS_TRANSACTION_END:
-+	case XS_DIRECTORY:
-+	case XS_READ:
-+	case XS_GET_PERMS:
-+	case XS_RELEASE:
-+	case XS_GET_DOMAIN_PATH:
-+	case XS_WRITE:
-+	case XS_MKDIR:
-+	case XS_RM:
-+	case XS_SET_PERMS:
-+		if (msg_type == XS_TRANSACTION_START) {
-+			trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-+			if (!trans) {
-+				rc = -ENOMEM;
-+				goto out;
-+			}
-+		}
++	return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_XEN */
 +
-+		reply = xenbus_dev_request_and_reply(&u->u.msg);
-+		if (IS_ERR(reply)) {
-+			kfree(trans);
-+			rc = PTR_ERR(reply);
-+			goto out;
-+		}
+ #else	/* !CONFIG_ACPI */
+ 
+ #define acpi_lapic 0
+@@ -181,7 +205,9 @@
+ 
+ extern u8 x86_acpiid_to_apicid[];
+ 
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT	1
++#endif
+ 
+ #endif /*__KERNEL__*/
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/agp.h
+--- a/include/asm-i386/agp.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/agp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,8 +12,10 @@
+  * data corruption on some CPUs.
+  */
+ 
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
++#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+ #define flush_agp_mappings() global_flush_tlb()
+ 
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/apic.h
+--- a/include/asm-i386/apic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/apic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -119,10 +119,12 @@
+ 
+ extern int disable_timer_pin_1;
+ 
++#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+ #define ARCH_APICTIMER_STOPS_ON_C3	1
++#endif
+ 
+ extern int timer_over_8254;
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/e820.h
+--- a/include/asm-i386/e820.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/e820.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -38,6 +38,7 @@
+ 
+ extern int e820_all_mapped(unsigned long start, unsigned long end,
+ 			   unsigned type);
++extern int e820_any_mapped(u64 start, u64 end, unsigned type);
+ 
+ #endif/*!__ASSEMBLY__*/
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/elf.h
+--- a/include/asm-i386/elf.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/elf.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -169,50 +169,6 @@
+ 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE);	\
+ } while (0)
+ 
+-/*
+- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+- * extra segments containing the vsyscall DSO contents.  Dumping its
+- * contents makes post-mortem fully interpretable later without matching up
+- * the same kernel and hardware config to see what PC values meant.
+- * Dumping its extra ELF program headers includes all the other information
+- * a debugger needs to easily find how the vsyscall DSO was being used.
+- */
+-#define ELF_CORE_EXTRA_PHDRS		(VDSO_HIGH_EHDR->e_phnum)
+-#define ELF_CORE_WRITE_EXTRA_PHDRS					      \
+-do {									      \
+-	const struct elf_phdr *const vsyscall_phdrs =			      \
+-		(const struct elf_phdr *) (VDSO_HIGH_BASE		      \
+-					   + VDSO_HIGH_EHDR->e_phoff);    \
+-	int i;								      \
+-	Elf32_Off ofs = 0;						      \
+-	for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) {		      \
+-		struct elf_phdr phdr = vsyscall_phdrs[i];		      \
+-		if (phdr.p_type == PT_LOAD) {				      \
+-			BUG_ON(ofs != 0);				      \
+-			ofs = phdr.p_offset = offset;			      \
+-			phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz);	      \
+-			phdr.p_filesz = phdr.p_memsz;			      \
+-			offset += phdr.p_filesz;			      \
+-		}							      \
+-		else							      \
+-			phdr.p_offset += ofs;				      \
+-		phdr.p_paddr = 0; /* match other core phdrs */		      \
+-		DUMP_WRITE(&phdr, sizeof(phdr));			      \
+-	}								      \
+-} while (0)
+-#define ELF_CORE_WRITE_EXTRA_DATA					      \
+-do {									      \
+-	const struct elf_phdr *const vsyscall_phdrs =			      \
+-		(const struct elf_phdr *) (VDSO_HIGH_BASE		      \
+-					   + VDSO_HIGH_EHDR->e_phoff);    \
+-	int i;								      \
+-	for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) {		      \
+-		if (vsyscall_phdrs[i].p_type == PT_LOAD)		      \
+-			DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr,	      \
+-				   PAGE_ALIGN(vsyscall_phdrs[i].p_memsz));    \
+-	}								      \
+-} while (0)
+-
+ #endif
+ 
+ #endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/fixmap.h
+--- a/include/asm-i386/fixmap.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/fixmap.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -19,7 +19,7 @@
+  * Leave one empty page between vmalloc'ed areas and
+  * the start of the fixmap.
+  */
+-#define __FIXADDR_TOP	0xfffff000
++extern unsigned long __FIXADDR_TOP;
+ 
+ #ifndef __ASSEMBLY__
+ #include <linux/kernel.h>
+@@ -94,6 +94,8 @@
+ extern void __set_fixmap (enum fixed_addresses idx,
+ 					unsigned long phys, pgprot_t flags);
+ 
++extern void set_fixaddr_top(unsigned long top);
 +
-+		if (msg_type == XS_TRANSACTION_START) {
-+			trans->handle.id = simple_strtoul(reply, NULL, 0);
-+			list_add(&trans->list, &u->transactions);
-+		} else if (msg_type == XS_TRANSACTION_END) {
-+			list_for_each_entry(trans, &u->transactions, list)
-+				if (trans->handle.id == u->u.msg.tx_id)
-+					break;
-+			BUG_ON(&trans->list == &u->transactions);
-+			list_del(&trans->list);
-+			kfree(trans);
-+		}
-+		mutex_lock(&u->reply_mutex);
-+		queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
-+		queue_reply(u, (char *)reply, u->u.msg.len);
-+		mutex_unlock(&u->reply_mutex);
-+		kfree(reply);
-+		break;
+ #define set_fixmap(idx, phys) \
+ 		__set_fixmap(idx, phys, PAGE_KERNEL)
+ /*
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/io_apic.h
+--- a/include/asm-i386/io_apic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/io_apic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,7 +12,7 @@
+ 
+ #ifdef CONFIG_X86_IO_APIC
+ 
+-#ifdef CONFIG_PCI_MSI
++#if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
+ static inline int use_pci_vector(void)	{return 1;}
+ static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+ static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/kexec.h
+--- a/include/asm-i386/kexec.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/kexec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,25 @@
+ #ifndef _I386_KEXEC_H
+ #define _I386_KEXEC_H
 +
-+	case XS_WATCH:
-+	case XS_UNWATCH: {
-+		static const char *XS_RESP = "OK";
-+		struct xsd_sockmsg hdr;
++#define PA_CONTROL_PAGE  0
++#define VA_CONTROL_PAGE  1
++#define PA_PGD           2
++#define VA_PGD           3
++#define PA_PTE_0         4
++#define VA_PTE_0         5
++#define PA_PTE_1         6
++#define VA_PTE_1         7
++#ifdef CONFIG_X86_PAE
++#define PA_PMD_0         8
++#define VA_PMD_0         9
++#define PA_PMD_1         10
++#define VA_PMD_1         11
++#define PAGES_NR         12
++#else
++#define PAGES_NR         8
++#endif
 +
-+		path = u->u.buffer + sizeof(u->u.msg);
-+		token = memchr(path, 0, u->u.msg.len);
-+		if (token == NULL) {
-+			rc = -EILSEQ;
-+			goto out;
-+		}
-+		token++;
++#ifndef __ASSEMBLY__
+ 
+ #include <asm/fixmap.h>
+ #include <asm/ptrace.h>
+@@ -26,6 +46,9 @@
+ 
+ /* The native architecture */
+ #define KEXEC_ARCH KEXEC_ARCH_386
 +
-+		if (msg_type == XS_WATCH) {
-+			watch = kmalloc(sizeof(*watch), GFP_KERNEL);
-+			watch->watch.node = kmalloc(strlen(path)+1,
-+                                                    GFP_KERNEL);
-+			strcpy((char *)watch->watch.node, path);
-+			watch->watch.callback = watch_fired;
-+			watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
-+			strcpy(watch->token, token);
-+			watch->dev_data = u;
++/* We can also handle crash dumps from 64 bit kernel. */
++#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
+ 
+ #define MAX_NOTE_BYTES 1024
+ 
+@@ -72,5 +95,26 @@
+                newregs->eip = (unsigned long)current_text_addr();
+        }
+ }
++asmlinkage NORET_TYPE void
++relocate_kernel(unsigned long indirection_page,
++		unsigned long control_page,
++		unsigned long start_address,
++		unsigned int has_pae) ATTRIB_NORET;
 +
-+			err = register_xenbus_watch(&watch->watch);
-+			if (err) {
-+				free_watch_adapter(watch);
-+				rc = err;
-+				goto out;
-+			}
-+			
-+			list_add(&watch->list, &u->watches);
-+		} else {
-+			list_for_each_entry_safe(watch, tmp_watch,
-+                                                 &u->watches, list) {
-+				if (!strcmp(watch->token, token) &&
-+				    !strcmp(watch->watch.node, path))
-+				{
-+					unregister_xenbus_watch(&watch->watch);
-+					list_del(&watch->list);
-+					free_watch_adapter(watch);
-+					break;
-+				}
-+			}
-+		}
 +
-+		hdr.type = msg_type;
-+		hdr.len = strlen(XS_RESP) + 1;
-+		mutex_lock(&u->reply_mutex);
-+		queue_reply(u, (char *)&hdr, sizeof(hdr));
-+		queue_reply(u, (char *)XS_RESP, hdr.len);
-+		mutex_unlock(&u->reply_mutex);
-+		break;
-+	}
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of 
++ * the pseudo physical address which would be given by the default macros.
++ */
 +
-+	default:
-+		rc = -EINVAL;
-+		break;
-+	}
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page)  pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
 +
-+ out:
-+	u->len = 0;
-+	return rc;
++#endif /* __ASSEMBLY__ */
+ 
+ #endif /* _I386_KEXEC_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-default/mach_traps.h
+--- a/include/asm-i386/mach-default/mach_traps.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/mach-default/mach_traps.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,6 +12,18 @@
+ static inline void clear_mem_error(unsigned char reason)
+ {
+ 	reason = (reason & 0xf) | 4;
++	outb(reason, 0x61);
 +}
 +
-+static int xenbus_dev_open(struct inode *inode, struct file *filp)
++static inline void clear_io_check_error(unsigned char reason)
 +{
-+	struct xenbus_dev_data *u;
++	unsigned long i;
 +
-+	if (xen_store_evtchn == 0)
-+		return -ENOENT;
++	reason = (reason & 0xf) | 8;
++	outb(reason, 0x61);
++	i = 2000;
++	while (--i) udelay(1000);
++	reason &= ~8;
+ 	outb(reason, 0x61);
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/agp.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/agp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,44 @@
++#ifndef AGP_H
++#define AGP_H 1
 +
-+	nonseekable_open(inode, filp);
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++#include <asm/system.h>
 +
-+	u = kzalloc(sizeof(*u), GFP_KERNEL);
-+	if (u == NULL)
-+		return -ENOMEM;
++/* 
++ * Functions to keep the agpgart mappings coherent with the MMU.
++ * The GART gives the CPU a physical alias of pages in memory. The alias region is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page. This avoids
++ * data corruption on some CPUs.
++ */
 +
-+	INIT_LIST_HEAD(&u->transactions);
-+	INIT_LIST_HEAD(&u->watches);
-+	INIT_LIST_HEAD(&u->read_buffers);
-+	init_waitqueue_head(&u->read_waitq);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) ( \
++	xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++	?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++	xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++	/* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++	change_page_attr(page, 1, PAGE_KERNEL))
++#define flush_agp_mappings() global_flush_tlb()
 +
-+	mutex_init(&u->reply_mutex);
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++   need to be called for each cacheline of the whole page so it may not be 
++   worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
 +
-+	filp->private_data = u;
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
 +
-+	return 0;
-+}
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order)	({                                          \
++	char *_t; dma_addr_t _d;                                            \
++	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
++	_t; })
++#define free_gatt_pages(table, order)	\
++	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
 +
-+static int xenbus_dev_release(struct inode *inode, struct file *filp)
-+{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	struct xenbus_dev_transaction *trans, *tmp;
-+	struct watch_adapter *watch, *tmp_watch;
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/desc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/desc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,166 @@
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
 +
-+	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
-+		xenbus_transaction_end(trans->handle, 1);
-+		list_del(&trans->list);
-+		kfree(trans);
-+	}
++#include <asm/ldt.h>
++#include <asm/segment.h>
 +
-+	list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
-+		unregister_xenbus_watch(&watch->watch);
-+		list_del(&watch->list);
-+		free_watch_adapter(watch);
-+	}
++#define CPU_16BIT_STACK_SIZE 1024
 +
-+	kfree(u);
++#ifndef __ASSEMBLY__
 +
-+	return 0;
-+}
++#include <linux/preempt.h>
++#include <linux/smp.h>
 +
-+static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
-+{
-+	struct xenbus_dev_data *u = file->private_data;
++#include <asm/mmu.h>
 +
-+	poll_wait(file, &u->read_waitq, wait);
-+	if (!list_empty(&u->read_buffers))
-+		return POLLIN | POLLRDNORM;
-+	return 0;
-+}
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 +
-+static const struct file_operations xenbus_dev_file_ops = {
-+	.read = xenbus_dev_read,
-+	.write = xenbus_dev_write,
-+	.open = xenbus_dev_open,
-+	.release = xenbus_dev_release,
-+	.poll = xenbus_dev_poll,
-+};
++DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
 +
-+int xenbus_dev_init(void)
-+{
-+	xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
-+	if (xenbus_dev_intf)
-+		xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
++struct Xgt_desc_struct {
++	unsigned short size;
++	unsigned long address __attribute__((packed));
++	unsigned short pad;
++} __attribute__ ((packed));
 +
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe_backend.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe_backend.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe_backend.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe_backend.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,287 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have (backend half).
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005, 2006 XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++extern struct Xgt_desc_struct idt_descr;
++DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
 +
-+#define DPRINTK(fmt, args...)				\
-+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
-+		 __FUNCTION__, __LINE__, ##args)
 +
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
++static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
++{
++	return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++}
 +
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <xen/evtchn.h>
-+#include <xen/features.h>
-+#include <xen/hvm.h>
++#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
 +
-+#include "xenbus_comms.h"
-+#include "xenbus_probe.h"
++#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
++#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
++#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
++#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
 +
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+				 int num_envp, char *buffer, int buffer_size);
-+static int xenbus_probe_backend(const char *type, const char *domid);
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern void set_intr_gate(unsigned int irq, void * addr);
 +
-+extern int read_otherend_details(struct xenbus_device *xendev,
-+				 char *id_node, char *path_node);
++#define _set_tssldt_desc(n,addr,limit,type) \
++__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
++	"movw %w1,2(%2)\n\t" \
++	"rorl $16,%1\n\t" \
++	"movb %b1,4(%2)\n\t" \
++	"movb %4,5(%2)\n\t" \
++	"movb $0,6(%2)\n\t" \
++	"movb %h1,7(%2)\n\t" \
++	"rorl $16,%1" \
++	: "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
 +
-+static int read_frontend_details(struct xenbus_device *xendev)
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
 +{
-+	return read_otherend_details(xendev, "frontend-id", "frontend");
++	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
++		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
 +}
 +
-+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
-+static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#endif
++
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
 +{
-+	int domid, err;
-+	const char *devid, *type, *frontend;
-+	unsigned int typelen;
++	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++}
 +
-+	type = strchr(nodename, '/');
-+	if (!type)
-+		return -EINVAL;
-+	type++;
-+	typelen = strcspn(type, "/");
-+	if (!typelen || type[typelen] != '/')
-+		return -EINVAL;
++#define LDT_entry_a(info) \
++	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
 +
-+	devid = strrchr(nodename, '/') + 1;
++#define LDT_entry_b(info) \
++	(((info)->base_addr & 0xff000000) | \
++	(((info)->base_addr & 0x00ff0000) >> 16) | \
++	((info)->limit & 0xf0000) | \
++	(((info)->read_exec_only ^ 1) << 9) | \
++	((info)->contents << 10) | \
++	(((info)->seg_not_present ^ 1) << 15) | \
++	((info)->seg_32bit << 22) | \
++	((info)->limit_in_pages << 23) | \
++	((info)->useable << 20) | \
++	0x7000)
 +
-+	err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
-+			    "frontend", NULL, &frontend,
-+			    NULL);
-+	if (err)
-+		return err;
-+	if (strlen(frontend) == 0)
-+		err = -ERANGE;
-+	if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
-+		err = -ENOENT;
-+	kfree(frontend);
++#define LDT_empty(info) (\
++	(info)->base_addr	== 0	&& \
++	(info)->limit		== 0	&& \
++	(info)->contents	== 0	&& \
++	(info)->read_exec_only	== 1	&& \
++	(info)->seg_32bit	== 0	&& \
++	(info)->limit_in_pages	== 0	&& \
++	(info)->seg_not_present	== 1	&& \
++	(info)->useable		== 0	)
 +
-+	if (err)
-+		return err;
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
 +
-+	if (snprintf(bus_id, BUS_ID_SIZE,
-+		     "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
-+		return -ENOSPC;
-+	return 0;
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++					       *(u64 *)&t->tls_array[i])) \
++		BUG();
++	C(0); C(1); C(2);
++#undef C
 +}
 +
-+static struct xen_bus_type xenbus_backend = {
-+	.root = "backend",
-+	.levels = 3, 		/* backend/type/<frontend>/<id> */
-+	.get_bus_id = backend_bus_id,
-+	.probe = xenbus_probe_backend,
-+	.error = -ENODEV,
-+	.bus = {
-+		.name     = "xen-backend",
-+		.match    = xenbus_match,
-+		.probe    = xenbus_dev_probe,
-+		.remove   = xenbus_dev_remove,
-+//		.shutdown = xenbus_dev_shutdown,
-+		.uevent   = xenbus_uevent_backend,
-+	},
-+	.dev = {
-+		.bus_id = "xen-backend",
-+	},
-+};
++static inline void clear_LDT(void)
++{
++	int cpu = get_cpu();
 +
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+				 int num_envp, char *buffer, int buffer_size)
++	/*
++	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
++	 * it slows down context switching. Noone uses it anyway.
++	 */
++	cpu = cpu;		/* XXX avoid compiler warning */
++	xen_set_ldt(NULL, 0);
++	put_cpu();
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
 +{
-+	struct xenbus_device *xdev;
-+	struct xenbus_driver *drv;
-+	int i = 0;
-+	int length = 0;
++	void *segments = pc->ldt;
++	int count = pc->size;
 +
-+	DPRINTK("");
++	if (likely(!count))
++		segments = NULL;
 +
-+	if (dev == NULL)
-+		return -ENODEV;
++	xen_set_ldt(segments, count);
++}
 +
-+	xdev = to_xenbus_device(dev);
-+	if (xdev == NULL)
-+		return -ENODEV;
++static inline void load_LDT(mm_context_t *pc)
++{
++	int cpu = get_cpu();
++	load_LDT_nolock(pc, cpu);
++	put_cpu();
++}
 +
-+	/* stuff we want to pass to /sbin/hotplug */
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "XENBUS_TYPE=%s", xdev->devicetype);
++static inline unsigned long get_desc_base(unsigned long *desc)
++{
++	unsigned long base;
++	base = ((desc[0] >> 16)  & 0x0000ffff) |
++		((desc[1] << 16) & 0x00ff0000) |
++		(desc[1] & 0xff000000);
++	return base;
++}
 +
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "XENBUS_PATH=%s", xdev->nodename);
++#endif /* !__ASSEMBLY__ */
 +
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "XENBUS_BASE_PATH=%s", xenbus_backend.root);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/dma-mapping.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/dma-mapping.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,151 @@
++#ifndef _ASM_I386_DMA_MAPPING_H
++#define _ASM_I386_DMA_MAPPING_H
 +
-+	/* terminate, set to next free slot, shrink available space */
-+	envp[i] = NULL;
-+	envp = &envp[i];
-+	num_envp -= i;
-+	buffer = &buffer[length];
-+	buffer_size -= length;
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
 +
-+	if (dev->driver) {
-+		drv = to_xenbus_driver(dev->driver);
-+		if (drv && drv->uevent)
-+			return drv->uevent(xdev, envp, num_envp, buffer,
-+					   buffer_size);
-+	}
++#include <linux/mm.h>
++#include <asm/cache.h>
++#include <asm/io.h>
++#include <asm/scatterlist.h>
++#include <asm/swiotlb.h>
 +
-+	return 0;
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++	dma_addr_t mask = 0xffffffff;
++	/* If the device has a mask, use it, otherwise default to 32 bits */
++	if (hwdev && hwdev->dma_mask)
++		mask = *hwdev->dma_mask;
++	return (addr & ~mask) != 0;
 +}
 +
-+int xenbus_register_backend(struct xenbus_driver *drv)
-+{
-+	drv->read_otherend_details = read_frontend_details;
++extern int range_straddles_page_boundary(paddr_t p, size_t size);
 +
-+	return xenbus_register_driver_common(drv, &xenbus_backend);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_register_backend);
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 +
-+/* backend/<typename>/<frontend-uuid>/<name> */
-+static int xenbus_probe_backend_unit(const char *dir,
-+				     const char *type,
-+				     const char *name)
-+{
-+	char *nodename;
-+	int err;
++void *dma_alloc_coherent(struct device *dev, size_t size,
++			   dma_addr_t *dma_handle, gfp_t flag);
 +
-+	nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
-+	if (!nodename)
-+		return -ENOMEM;
++void dma_free_coherent(struct device *dev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle);
 +
-+	DPRINTK("%s\n", nodename);
++extern dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++	       enum dma_data_direction direction);
 +
-+	err = xenbus_probe_node(&xenbus_backend, type, nodename);
-+	kfree(nodename);
-+	return err;
-+}
++extern void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++		 enum dma_data_direction direction);
 +
-+/* backend/<typename>/<frontend-domid> */
-+static int xenbus_probe_backend(const char *type, const char *domid)
-+{
-+	char *nodename;
-+	int err = 0;
-+	char **dir;
-+	unsigned int i, dir_n = 0;
++extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
++		      int nents, enum dma_data_direction direction);
++extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++			 int nents, enum dma_data_direction direction);
 +
-+	DPRINTK("");
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++	     size_t size, enum dma_data_direction direction);
 +
-+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
-+	if (!nodename)
-+		return -ENOMEM;
++extern void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++	       enum dma_data_direction direction);
++#else
++#define dma_map_page(dev, page, offset, size, dir) \
++	dma_map_single(dev, page_address(page) + (offset), (size), (dir))
++#define dma_unmap_page dma_unmap_single
++#endif
 +
-+	dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
-+	if (IS_ERR(dir)) {
-+		kfree(nodename);
-+		return PTR_ERR(dir);
-+	}
++extern void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++			enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++                           enum dma_data_direction direction);
 +
-+	for (i = 0; i < dir_n; i++) {
-+		err = xenbus_probe_backend_unit(nodename, type, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	kfree(nodename);
-+	return err;
++static inline void
++dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
++			      unsigned long offset, size_t size,
++			      enum dma_data_direction direction)
++{
++	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
 +}
 +
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
++static inline void
++dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
++				 unsigned long offset, size_t size,
++				 enum dma_data_direction direction)
 +{
-+	DPRINTK("");
-+
-+	dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
 +}
 +
-+static struct xenbus_watch be_watch = {
-+	.node = "backend",
-+	.callback = backend_changed,
-+};
-+
-+void xenbus_backend_suspend(int (*fn)(struct device *, void *))
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
++		    enum dma_data_direction direction)
 +{
-+	DPRINTK("");
-+	if (!xenbus_backend.error)
-+		bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++	if (swiotlb)
++		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
++	flush_write_buffers();
 +}
 +
-+void xenbus_backend_resume(int (*fn)(struct device *, void *))
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
++		    enum dma_data_direction direction)
 +{
-+	DPRINTK("");
-+	if (!xenbus_backend.error)
-+		bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++	if (swiotlb)
++		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
++	flush_write_buffers();
 +}
 +
-+void xenbus_backend_probe_and_watch(void)
++extern int
++dma_mapping_error(dma_addr_t dma_addr);
++
++extern int
++dma_supported(struct device *dev, u64 mask);
++
++static inline int
++dma_set_mask(struct device *dev, u64 mask)
 +{
-+	xenbus_probe_devices(&xenbus_backend);
-+	register_xenbus_watch(&be_watch);
++	if(!dev->dma_mask || !dma_supported(dev, mask))
++		return -EIO;
++
++	*dev->dma_mask = mask;
++
++	return 0;
 +}
 +
-+void xenbus_backend_bus_register(void)
++static inline int
++dma_get_cache_alignment(void)
 +{
-+	xenbus_backend.error = bus_register(&xenbus_backend.bus);
-+	if (xenbus_backend.error)
-+		printk(KERN_WARNING
-+		       "XENBUS: Error registering backend bus: %i\n",
-+		       xenbus_backend.error);
++	/* no easy way to get cache size on all x86, so return the
++	 * maximum possible, to be safe */
++	return (1 << INTERNODE_CACHE_SHIFT);
 +}
 +
-+void xenbus_backend_device_register(void)
-+{
-+	if (xenbus_backend.error)
-+		return;
++#define dma_is_consistent(d)	(1)
 +
-+	xenbus_backend.error = device_register(&xenbus_backend.dev);
-+	if (xenbus_backend.error) {
-+		bus_unregister(&xenbus_backend.bus);
-+		printk(KERN_WARNING
-+		       "XENBUS: Error registering backend device: %i\n",
-+		       xenbus_backend.error);
-+	}
++static inline void
++dma_cache_sync(void *vaddr, size_t size,
++	       enum dma_data_direction direction)
++{
++	flush_write_buffers();
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,1132 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005, 2006 XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#define DPRINTK(fmt, args...)				\
-+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
-+		 __FUNCTION__, __LINE__, ##args)
 +
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
-+#include <linux/mutex.h>
++#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++extern int
++dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++			    dma_addr_t device_addr, size_t size, int flags);
 +
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <xen/evtchn.h>
-+#include <xen/features.h>
-+#include <xen/hvm.h>
++extern void
++dma_release_declared_memory(struct device *dev);
 +
-+#include "xenbus_comms.h"
-+#include "xenbus_probe.h"
++extern void *
++dma_mark_declared_memory_occupied(struct device *dev,
++				  dma_addr_t device_addr, size_t size);
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
 +#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/fixmap.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/fixmap.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,155 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
 +
-+int xen_store_evtchn;
-+struct xenstore_domain_interface *xen_store_interface;
-+static unsigned long xen_store_mfn;
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
 +
-+extern struct mutex xenwatch_mutex;
 +
-+static ATOMIC_NOTIFIER_HEAD(xenstore_chain);
++/* used by vmalloc.c, vsyscall.lds.S.
++ *
++ * Leave one empty page between vmalloc'ed areas and
++ * the start of the fixmap.
++ */
++extern unsigned long __FIXADDR_TOP;
 +
-+static void wait_for_devices(struct xenbus_driver *xendrv);
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/acpi.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#ifdef CONFIG_HIGHMEM
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#endif
 +
-+static int xenbus_probe_frontend(const char *type, const char *name);
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process. We allocate these special addresses
++ * from the end of virtual memory (0xfffff000) backwards.
++ * Also this lets us do fail-safe vmalloc(), we
++ * can guarantee that these special addresses and
++ * vmalloc()-ed addresses never overlap.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++enum fixed_addresses {
++	FIX_HOLE,
++	FIX_VDSO,
++#ifdef CONFIG_X86_LOCAL_APIC
++	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	FIX_IO_APIC_BASE_0,
++	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_X86_VISWS_APIC
++	FIX_CO_CPU,	/* Cobalt timer */
++	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
++	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
++	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
++#endif
++#ifdef CONFIG_X86_F00F_BUG
++	FIX_F00F_IDT,	/* Virtual mapping for IDT */
++#endif
++#ifdef CONFIG_X86_CYCLONE_TIMER
++	FIX_CYCLONE_TIMER, /*cyclone timer register*/
++#endif 
++#ifdef CONFIG_HIGHMEM
++	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
++	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++#endif
++#ifdef CONFIG_ACPI
++	FIX_ACPI_BEGIN,
++	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++#ifdef CONFIG_PCI_MMCONFIG
++	FIX_PCIE_MCFG,
++#endif
++	FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS	256
++	FIX_ISAMAP_END,
++	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++	__end_of_permanent_fixed_addresses,
++	/* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS	16
++	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++	FIX_WP_TEST,
++	__end_of_fixed_addresses
++};
 +
-+static void xenbus_dev_shutdown(struct device *_dev);
++extern void set_fixaddr_top(unsigned long top);
 +
-+/* If something in array of ids matches this device, return it. */
-+static const struct xenbus_device_id *
-+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
-+{
-+	for (; *arr->devicetype != '\0'; arr++) {
-+		if (!strcmp(arr->devicetype, dev->devicetype))
-+			return arr;
-+	}
-+	return NULL;
-+}
++extern void __set_fixmap(enum fixed_addresses idx,
++					maddr_t phys, pgprot_t flags);
 +
-+int xenbus_match(struct device *_dev, struct device_driver *_drv)
-+{
-+	struct xenbus_driver *drv = to_xenbus_driver(_drv);
++#define set_fixmap(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
 +
-+	if (!drv->ids)
-+		return 0;
++#define clear_fixmap(idx) \
++		__set_fixmap(idx, 0, __pgprot(0))
 +
-+	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
-+}
++#define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
 +
-+/* device/<type>/<id> => <type>-<id> */
-+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+	nodename = strchr(nodename, '/');
-+	if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
-+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-+		return -EINVAL;
-+	}
++#define __FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
++#define __FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
++#define FIXADDR_BOOT_START	(FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
 +
-+	strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
-+	if (!strchr(bus_id, '/')) {
-+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-+		return -EINVAL;
-+	}
-+	*strchr(bus_id, '/') = '-';
-+	return 0;
-+}
++#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
 +
++extern void __this_fixmap_does_not_exist(void);
 +
-+static void free_otherend_details(struct xenbus_device *dev)
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without tranlation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
 +{
-+	kfree(dev->otherend);
-+	dev->otherend = NULL;
-+}
++	/*
++	 * this branch gets completely eliminated after inlining,
++	 * except when someone tries to use fixaddr indices in an
++	 * illegal way. (such as mixing up address types or using
++	 * out-of-range indices).
++	 *
++	 * If it doesn't get removed, the linker will complain
++	 * loudly with a reasonably clear error message..
++	 */
++	if (idx >= __end_of_fixed_addresses)
++		__this_fixmap_does_not_exist();
 +
++        return __fix_to_virt(idx);
++}
 +
-+static void free_otherend_watch(struct xenbus_device *dev)
++static inline unsigned long virt_to_fix(const unsigned long vaddr)
 +{
-+	if (dev->otherend_watch.node) {
-+		unregister_xenbus_watch(&dev->otherend_watch);
-+		kfree(dev->otherend_watch.node);
-+		dev->otherend_watch.node = NULL;
-+	}
++	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
++	return __virt_to_fix(vaddr);
 +}
 +
++#endif /* !__ASSEMBLY__ */
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/gnttab_dma.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/gnttab_dma.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
 +
-+int read_otherend_details(struct xenbus_device *xendev,
-+				 char *id_node, char *path_node)
-+{
-+	int err = xenbus_gather(XBT_NIL, xendev->nodename,
-+				id_node, "%i", &xendev->otherend_id,
-+				path_node, NULL, &xendev->otherend,
-+				NULL);
-+	if (err) {
-+		xenbus_dev_fatal(xendev, err,
-+				 "reading other end details from %s",
-+				 xendev->nodename);
-+		return err;
-+	}
-+	if (strlen(xendev->otherend) == 0 ||
-+	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
-+		xenbus_dev_fatal(xendev, -ENOENT,
-+				 "unable to read other end from %s.  "
-+				 "missing or inaccessible.",
-+				 xendev->nodename);
-+		free_otherend_details(xendev);
-+		return -ENOENT;
-+	}
++#ifndef _ASM_I386_GNTTAB_DMA_H
++#define _ASM_I386_GNTTAB_DMA_H
 +
-+	return 0;
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++	/* Has it become a local MFN? */
++	return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
 +}
 +
-+
-+static int read_backend_details(struct xenbus_device *xendev)
++static inline maddr_t gnttab_dma_map_page(struct page *page)
 +{
-+	return read_otherend_details(xendev, "backend-id", "backend");
++	__gnttab_dma_map_page(page);
++	return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT);
 +}
 +
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+static int xenbus_uevent_frontend(struct device *dev, char **envp,
-+				  int num_envp, char *buffer, int buffer_size)
++static inline void gnttab_dma_unmap_page(maddr_t maddr)
 +{
-+	struct xenbus_device *xdev;
-+	int length = 0, i = 0;
-+
-+	if (dev == NULL)
-+		return -ENODEV;
-+	xdev = to_xenbus_device(dev);
-+	if (xdev == NULL)
-+		return -ENODEV;
++	__gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
++}
 +
-+	/* stuff we want to pass to /sbin/hotplug */
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "XENBUS_TYPE=%s", xdev->devicetype);
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "XENBUS_PATH=%s", xdev->nodename);
-+	add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+		       "MODALIAS=xen:%s", xdev->devicetype);
++#endif /* _ASM_I386_GNTTAB_DMA_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/highmem.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/highmem.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,80 @@
++/*
++ * highmem.h: virtual kernel memory mappings for high memory
++ *
++ * Used in CONFIG_HIGHMEM systems for memory pages which
++ * are not addressable by direct kernel virtual addresses.
++ *
++ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
++ *		      Gerhard.Wichert at pdb.siemens.de
++ *
++ *
++ * Redesigned the x86 32-bit VM architecture to deal with 
++ * up to 16 Terabyte physical memory. With current x86 CPUs
++ * we now support up to 64 Gigabytes physical RAM.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ */
 +
-+	return 0;
-+}
-+#endif
++#ifndef _ASM_HIGHMEM_H
++#define _ASM_HIGHMEM_H
 +
-+/* Bus type for frontend drivers. */
-+static struct xen_bus_type xenbus_frontend = {
-+	.root = "device",
-+	.levels = 2, 		/* device/type/<id> */
-+	.get_bus_id = frontend_bus_id,
-+	.probe = xenbus_probe_frontend,
-+	.error = -ENODEV,
-+	.bus = {
-+		.name     = "xen",
-+		.match    = xenbus_match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+		.probe    = xenbus_dev_probe,
-+		.remove   = xenbus_dev_remove,
-+		.shutdown = xenbus_dev_shutdown,
-+		.uevent   = xenbus_uevent_frontend,
-+#endif
-+	},
-+	.dev = {
-+		.bus_id = "xen",
-+	},
-+};
++#ifdef __KERNEL__
 +
-+static void otherend_changed(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	struct xenbus_device *dev =
-+		container_of(watch, struct xenbus_device, otherend_watch);
-+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+	enum xenbus_state state;
++#include <linux/interrupt.h>
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#include <asm/tlbflush.h>
 +
-+	/* Protect us against watches firing on old details when the otherend
-+	   details change, say immediately after a resume. */
-+	if (!dev->otherend ||
-+	    strncmp(dev->otherend, vec[XS_WATCH_PATH],
-+		    strlen(dev->otherend))) {
-+		DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
-+		return;
-+	}
++/* declarations for highmem.c */
++extern unsigned long highstart_pfn, highend_pfn;
 +
-+	state = xenbus_read_driver_state(dev->otherend);
++extern pte_t *kmap_pte;
++extern pgprot_t kmap_prot;
++extern pte_t *pkmap_page_table;
 +
-+	DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
-+		dev->otherend_watch.node, vec[XS_WATCH_PATH]);
++/*
++ * Right now we initialize only a single pte table. It can be extended
++ * easily, subsequent pte tables have to be allocated in one physical
++ * chunk of RAM.
++ */
++#ifdef CONFIG_X86_PAE
++#define LAST_PKMAP 512
++#else
++#define LAST_PKMAP 1024
++#endif
++/*
++ * Ordering is:
++ *
++ * FIXADDR_TOP
++ * 			fixed_addresses
++ * FIXADDR_START
++ * 			temp fixed addresses
++ * FIXADDR_BOOT_START
++ * 			Persistent kmap area
++ * PKMAP_BASE
++ * VMALLOC_END
++ * 			Vmalloc area
++ * VMALLOC_START
++ * high_memory
++ */
++#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
++#define LAST_PKMAP_MASK (LAST_PKMAP-1)
++#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
++#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 +
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+	/*
-+	 * Ignore xenbus transitions during shutdown. This prevents us doing
-+	 * work that can fail e.g., when the rootfs is gone.
-+	 */
-+	if (system_state > SYSTEM_RUNNING) {
-+		struct xen_bus_type *bus = bus;
-+		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
-+		/* If we're frontend, drive the state machine to Closed. */
-+		/* This should cause the backend to release our resources. */
-+		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
-+			xenbus_frontend_closed(dev);
-+		return;
-+	}
-+#endif
++extern void * FASTCALL(kmap_high(struct page *page));
++extern void FASTCALL(kunmap_high(struct page *page));
 +
-+	if (drv->otherend_changed)
-+		drv->otherend_changed(dev, state);
-+}
++void *kmap(struct page *page);
++void kunmap(struct page *page);
++void *kmap_atomic(struct page *page, enum km_type type);
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++void kunmap_atomic(void *kvaddr, enum km_type type);
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
++struct page *kmap_atomic_to_page(void *ptr);
 +
++#define flush_cache_kmaps()	do { } while (0)
 +
-+static int talk_to_otherend(struct xenbus_device *dev)
-+{
-+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++#endif /* __KERNEL__ */
 +
-+	free_otherend_watch(dev);
-+	free_otherend_details(dev);
++#endif /* _ASM_HIGHMEM_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/hw_irq.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/hw_irq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,72 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
 +
-+	return drv->read_otherend_details(dev);
-+}
++/*
++ *	linux/include/asm/hw_irq.h
++ *
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ *	moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
++ */
 +
++#include <linux/profile.h>
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <asm/sections.h>
 +
-+static int watch_otherend(struct xenbus_device *dev)
-+{
-+	return xenbus_watch_path2(dev, dev->otherend, "state",
-+				  &dev->otherend_watch, otherend_changed);
-+}
++struct hw_interrupt_type;
 +
++#define NMI_VECTOR		0x02
 +
-+int xenbus_dev_probe(struct device *_dev)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+	const struct xenbus_device_id *id;
-+	int err;
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
 +
-+	DPRINTK("%s", dev->nodename);
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
++#define AUTO_ASSIGN		-1
 +
-+	if (!drv->probe) {
-+		err = -ENODEV;
-+		goto fail;
-+	}
++extern void (*interrupt[NR_IRQS])(void);
 +
-+	id = match_device(drv->ids, dev);
-+	if (!id) {
-+		err = -ENODEV;
-+		goto fail;
-+	}
++#ifdef CONFIG_SMP
++fastcall void reschedule_interrupt(void);
++fastcall void invalidate_interrupt(void);
++fastcall void call_function_interrupt(void);
++#endif
 +
-+	err = talk_to_otherend(dev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: talk_to_otherend on %s failed.\n",
-+		       dev->nodename);
-+		return err;
-+	}
++#ifdef CONFIG_X86_LOCAL_APIC
++fastcall void apic_timer_interrupt(void);
++fastcall void error_interrupt(void);
++fastcall void spurious_interrupt(void);
++fastcall void thermal_interrupt(struct pt_regs *);
++#define platform_legacy_irq(irq)	((irq) < 16)
++#endif
 +
-+	err = drv->probe(dev, id);
-+	if (err)
-+		goto fail;
++void disable_8259A_irq(unsigned int irq);
++void enable_8259A_irq(unsigned int irq);
++int i8259A_irq_pending(unsigned int irq);
++void make_8259A_irq(unsigned int irq);
++void init_8259A(int aeoi);
++void FASTCALL(send_IPI_self(int vector));
++void init_VISWS_APIC_irqs(void);
++void setup_IO_APIC(void);
++void disable_IO_APIC(void);
++#define print_IO_APIC()
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++void send_IPI(int dest, int vector);
++void setup_ioapic_dest(void);
 +
-+	err = watch_otherend(dev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: watch_otherend on %s failed.\n",
-+		       dev->nodename);
-+		return err;
-+	}
++extern unsigned long io_apic_irqs;
 +
-+	return 0;
-+fail:
-+	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
-+	xenbus_switch_state(dev, XenbusStateClosed);
-+	return -ENODEV;
-+}
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
 +
-+int xenbus_dev_remove(struct device *_dev)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
 +
-+	DPRINTK("%s", dev->nodename);
++#endif /* _ASM_HW_IRQ_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/hypercall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/hypercall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,416 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	free_otherend_watch(dev);
-+	free_otherend_details(dev);
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
 +
-+	if (drv->remove)
-+		drv->remove(dev);
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
 +
-+	xenbus_switch_state(dev, XenbusStateClosed);
-+	return 0;
-+}
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
 +
-+static void xenbus_dev_shutdown(struct device *_dev)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	unsigned long timeout = 5*HZ;
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name)					\
++	"call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name)					\
++	"mov hypercall_stubs,%%eax; "				\
++	"add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++	"call *%%eax"
++#endif
 +
-+	DPRINTK("%s", dev->nodename);
++#define _hypercall0(type, name)			\
++({						\
++	type __res;				\
++	asm volatile (				\
++		HYPERCALL_STR(name)		\
++		: "=a" (__res)			\
++		:				\
++		: "memory" );			\
++	__res;					\
++})
 +
-+	get_device(&dev->dev);
-+	if (dev->state != XenbusStateConnected) {
-+		printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
-+		       dev->nodename, xenbus_strstate(dev->state));
-+		goto out;
-+	}
-+	xenbus_switch_state(dev, XenbusStateClosing);
-+	timeout = wait_for_completion_timeout(&dev->down, timeout);
-+	if (!timeout)
-+		printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
-+ out:
-+	put_device(&dev->dev);
-+}
++#define _hypercall1(type, name, a1)				\
++({								\
++	type __res;						\
++	long __ign1;						\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=b" (__ign1)			\
++		: "1" ((long)(a1))				\
++		: "memory" );					\
++	__res;							\
++})
 +
-+int xenbus_register_driver_common(struct xenbus_driver *drv,
-+				  struct xen_bus_type *bus)
-+{
-+	int ret;
++#define _hypercall2(type, name, a1, a2)				\
++({								\
++	type __res;						\
++	long __ign1, __ign2;					\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
++		: "1" ((long)(a1)), "2" ((long)(a2))		\
++		: "memory" );					\
++	__res;							\
++})
 +
-+	if (bus->error)
-+		return bus->error;
++#define _hypercall3(type, name, a1, a2, a3)			\
++({								\
++	type __res;						\
++	long __ign1, __ign2, __ign3;				\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3))				\
++		: "memory" );					\
++	__res;							\
++})
 +
-+	drv->driver.name = drv->name;
-+	drv->driver.bus = &bus->bus;
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
-+	drv->driver.owner = drv->owner;
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+	drv->driver.probe = xenbus_dev_probe;
-+	drv->driver.remove = xenbus_dev_remove;
-+	drv->driver.shutdown = xenbus_dev_shutdown;
-+#endif
++#define _hypercall4(type, name, a1, a2, a3, a4)			\
++({								\
++	type __res;						\
++	long __ign1, __ign2, __ign3, __ign4;			\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
++		"=d" (__ign3), "=S" (__ign4)			\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "4" ((long)(a4))		\
++		: "memory" );					\
++	__res;							\
++})
 +
-+	mutex_lock(&xenwatch_mutex);
-+	ret = driver_register(&drv->driver);
-+	mutex_unlock(&xenwatch_mutex);
-+	return ret;
-+}
++#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
++({								\
++	type __res;						\
++	long __ign1, __ign2, __ign3, __ign4, __ign5;		\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
++		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "4" ((long)(a4)),		\
++		"5" ((long)(a5))				\
++		: "memory" );					\
++	__res;							\
++})
 +
-+int xenbus_register_frontend(struct xenbus_driver *drv)
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++	const trap_info_t *table)
 +{
-+	int ret;
-+
-+	drv->read_otherend_details = read_backend_details;
-+
-+	ret = xenbus_register_driver_common(drv, &xenbus_frontend);
-+	if (ret)
-+		return ret;
-+
-+	/* If this driver is loaded as a module wait for devices to attach. */
-+	wait_for_devices(drv);
-+
-+	return 0;
++	return _hypercall1(int, set_trap_table, table);
 +}
-+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
 +
-+void xenbus_unregister_driver(struct xenbus_driver *drv)
++static inline int __must_check
++HYPERVISOR_mmu_update(
++	mmu_update_t *req, unsigned int count, unsigned int *success_count,
++	domid_t domid)
 +{
-+	driver_unregister(&drv->driver);
++	return _hypercall4(int, mmu_update, req, count, success_count, domid);
 +}
-+EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
 +
-+struct xb_find_info
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++	struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++	domid_t domid)
 +{
-+	struct xenbus_device *dev;
-+	const char *nodename;
-+};
++	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
 +
-+static int cmp_dev(struct device *dev, void *data)
++static inline int __must_check
++HYPERVISOR_set_gdt(
++	unsigned long *frame_list, unsigned int entries)
 +{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct xb_find_info *info = data;
-+
-+	if (!strcmp(xendev->nodename, info->nodename)) {
-+		info->dev = xendev;
-+		get_device(dev);
-+		return 1;
-+	}
-+	return 0;
++	return _hypercall2(int, set_gdt, frame_list, entries);
 +}
 +
-+struct xenbus_device *xenbus_device_find(const char *nodename,
-+					 struct bus_type *bus)
++static inline int __must_check
++HYPERVISOR_stack_switch(
++	unsigned long ss, unsigned long esp)
 +{
-+	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
-+
-+	bus_for_each_dev(bus, NULL, &info, cmp_dev);
-+	return info.dev;
++	return _hypercall2(int, stack_switch, ss, esp);
 +}
 +
-+static int cleanup_dev(struct device *dev, void *data)
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++	unsigned long event_selector, unsigned long event_address,
++	unsigned long failsafe_selector, unsigned long failsafe_address)
 +{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct xb_find_info *info = data;
-+	int len = strlen(info->nodename);
-+
-+	DPRINTK("%s", info->nodename);
++	return _hypercall4(int, set_callbacks,
++			   event_selector, event_address,
++			   failsafe_selector, failsafe_address);
++}
 +
-+	/* Match the info->nodename path, or any subdirectory of that path. */
-+	if (strncmp(xendev->nodename, info->nodename, len))
-+		return 0;
++static inline int
++HYPERVISOR_fpu_taskswitch(
++	int set)
++{
++	return _hypercall1(int, fpu_taskswitch, set);
++}
 +
-+	/* If the node name is longer, ensure it really is a subdirectory. */
-+	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
-+		return 0;
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++	int cmd, unsigned long arg)
++{
++	return _hypercall2(int, sched_op_compat, cmd, arg);
++}
 +
-+	info->dev = xendev;
-+	get_device(dev);
-+	return 1;
++static inline int __must_check
++HYPERVISOR_sched_op(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, sched_op, cmd, arg);
 +}
 +
-+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++	u64 timeout)
 +{
-+	struct xb_find_info info = { .nodename = path };
++	unsigned long timeout_hi = (unsigned long)(timeout>>32);
++	unsigned long timeout_lo = (unsigned long)timeout;
++	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
 +
-+	do {
-+		info.dev = NULL;
-+		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
-+		if (info.dev) {
-+			device_unregister(&info.dev->dev);
-+			put_device(&info.dev->dev);
-+		}
-+	} while (info.dev);
++static inline int __must_check
++HYPERVISOR_platform_op(
++	struct xen_platform_op *platform_op)
++{
++	platform_op->interface_version = XENPF_INTERFACE_VERSION;
++	return _hypercall1(int, platform_op, platform_op);
 +}
 +
-+static void xenbus_dev_release(struct device *dev)
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++	unsigned int reg, unsigned long value)
 +{
-+	if (dev)
-+		kfree(to_xenbus_device(dev));
++	return _hypercall2(int, set_debugreg, reg, value);
 +}
 +
-+static ssize_t xendev_show_nodename(struct device *dev,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+				    struct device_attribute *attr,
-+#endif
-+				    char *buf)
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++	unsigned int reg)
 +{
-+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
++	return _hypercall1(unsigned long, get_debugreg, reg);
 +}
-+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
 +
-+static ssize_t xendev_show_devtype(struct device *dev,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+				   struct device_attribute *attr,
-+#endif
-+				   char *buf)
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++	u64 ma, u64 desc)
 +{
-+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
++	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 +}
-+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
 +
++static inline int __must_check
++HYPERVISOR_memory_op(
++	unsigned int cmd, void *arg)
++{
++	return _hypercall2(int, memory_op, cmd, arg);
++}
 +
-+int xenbus_probe_node(struct xen_bus_type *bus,
-+		      const char *type,
-+		      const char *nodename)
++static inline int __must_check
++HYPERVISOR_multicall(
++	multicall_entry_t *call_list, unsigned int nr_calls)
 +{
-+	int err;
-+	struct xenbus_device *xendev;
-+	size_t stringlen;
-+	char *tmpstring;
++	return _hypercall2(int, multicall, call_list, nr_calls);
++}
 +
-+	enum xenbus_state state = xenbus_read_driver_state(nodename);
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++	unsigned long va, pte_t new_val, unsigned long flags)
++{
++	unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++	pte_hi = new_val.pte_high;
++#endif
++	return _hypercall4(int, update_va_mapping, va,
++			   new_val.pte_low, pte_hi, flags);
++}
 +
-+	if (bus->error)
-+		return bus->error;
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++	int cmd, void *arg)
++{
++	int rc = _hypercall2(int, event_channel_op, cmd, arg);
 +
-+	if (state != XenbusStateInitialising) {
-+		/* Device is not new, so ignore it.  This can happen if a
-+		   device is going away after switching to Closed.  */
-+		return 0;
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		struct evtchn_op op;
++		op.cmd = cmd;
++		memcpy(&op.u, arg, sizeof(op.u));
++		rc = _hypercall1(int, event_channel_op_compat, &op);
++		memcpy(arg, &op.u, sizeof(op.u));
 +	}
++#endif
 +
-+	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
-+	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
-+	if (!xendev)
-+		return -ENOMEM;
-+
-+	xendev->state = XenbusStateInitialising;
-+
-+	/* Copy the strings into the extra space. */
-+
-+	tmpstring = (char *)(xendev + 1);
-+	strcpy(tmpstring, nodename);
-+	xendev->nodename = tmpstring;
-+
-+	tmpstring += strlen(tmpstring) + 1;
-+	strcpy(tmpstring, type);
-+	xendev->devicetype = tmpstring;
-+	init_completion(&xendev->down);
-+
-+	xendev->dev.parent = &bus->dev;
-+	xendev->dev.bus = &bus->bus;
-+	xendev->dev.release = xenbus_dev_release;
-+
-+	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
-+	if (err)
-+		goto fail;
-+
-+	/* Register with generic device framework. */
-+	err = device_register(&xendev->dev);
-+	if (err)
-+		goto fail;
++	return rc;
++}
 +
-+	err = device_create_file(&xendev->dev, &dev_attr_nodename);
-+	if (err)
-+		goto unregister;
-+	err = device_create_file(&xendev->dev, &dev_attr_devtype);
-+	if (err)
-+		goto unregister;
++static inline int __must_check
++HYPERVISOR_acm_op(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, acm_op, cmd, arg);
++}
 +
-+	return 0;
-+unregister:
-+	device_remove_file(&xendev->dev, &dev_attr_nodename);
-+	device_remove_file(&xendev->dev, &dev_attr_devtype);
-+	device_unregister(&xendev->dev);
-+fail:
-+	kfree(xendev);
-+	return err;
++static inline int __must_check
++HYPERVISOR_xen_version(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, xen_version, cmd, arg);
 +}
 +
-+/* device/<typename>/<name> */
-+static int xenbus_probe_frontend(const char *type, const char *name)
++static inline int __must_check
++HYPERVISOR_console_io(
++	int cmd, unsigned int count, char *str)
 +{
-+	char *nodename;
-+	int err;
++	return _hypercall3(int, console_io, cmd, count, str);
++}
 +
-+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
-+	if (!nodename)
-+		return -ENOMEM;
++static inline int __must_check
++HYPERVISOR_physdev_op(
++	int cmd, void *arg)
++{
++	int rc = _hypercall2(int, physdev_op, cmd, arg);
 +
-+	DPRINTK("%s", nodename);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		struct physdev_op op;
++		op.cmd = cmd;
++		memcpy(&op.u, arg, sizeof(op.u));
++		rc = _hypercall1(int, physdev_op_compat, &op);
++		memcpy(arg, &op.u, sizeof(op.u));
++	}
++#endif
 +
-+	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-+	kfree(nodename);
-+	return err;
++	return rc;
 +}
 +
-+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++	unsigned int cmd, void *uop, unsigned int count)
 +{
-+	int err = 0;
-+	char **dir;
-+	unsigned int dir_n = 0;
-+	int i;
++	return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
 +
-+	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
-+	if (IS_ERR(dir))
-+		return PTR_ERR(dir);
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++	unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++	pte_hi = new_val.pte_high;
++#endif
++	return _hypercall5(int, update_va_mapping_otherdomain, va,
++			   new_val.pte_low, pte_hi, flags, domid);
++}
 +
-+	for (i = 0; i < dir_n; i++) {
-+		err = bus->probe(type, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	return err;
++static inline int __must_check
++HYPERVISOR_vm_assist(
++	unsigned int cmd, unsigned int type)
++{
++	return _hypercall2(int, vm_assist, cmd, type);
 +}
 +
-+int xenbus_probe_devices(struct xen_bus_type *bus)
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++	int cmd, unsigned int vcpuid, void *extra_args)
 +{
-+	int err = 0;
-+	char **dir;
-+	unsigned int i, dir_n;
++	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
 +
-+	if (bus->error)
-+		return bus->error;
++static inline int __must_check
++HYPERVISOR_suspend(
++	unsigned long srec)
++{
++	struct sched_shutdown sched_shutdown = {
++		.reason = SHUTDOWN_suspend
++	};
 +
-+	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
-+	if (IS_ERR(dir))
-+		return PTR_ERR(dir);
++	int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++			     &sched_shutdown, srec);
 +
-+	for (i = 0; i < dir_n; i++) {
-+		err = xenbus_probe_device_type(bus, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	return err;
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (rc == -ENOSYS)
++		rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++				 SHUTDOWN_suspend, srec);
++#endif
++
++	return rc;
 +}
 +
-+static unsigned int char_count(const char *str, char c)
++#if CONFIG_XEN_COMPAT <= 0x030002
++static inline int
++HYPERVISOR_nmi_op(
++	unsigned long op, void *arg)
 +{
-+	unsigned int i, ret = 0;
++	return _hypercall2(int, nmi_op, op, arg);
++}
++#endif
 +
-+	for (i = 0; str[i]; i++)
-+		if (str[i] == c)
-+			ret++;
-+	return ret;
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++    int op, void *arg)
++{
++    return _hypercall2(unsigned long, hvm_op, op, arg);
 +}
++#endif
 +
-+static int strsep_len(const char *str, char c, unsigned int len)
++static inline int __must_check
++HYPERVISOR_callback_op(
++	int cmd, const void *arg)
 +{
-+	unsigned int i;
++	return _hypercall2(int, callback_op, cmd, arg);
++}
 +
-+	for (i = 0; str[i]; i++)
-+		if (str[i] == c) {
-+			if (len == 0)
-+				return i;
-+			len--;
-+		}
-+	return (len == 0) ? i : -ERANGE;
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++	int op, void *arg)
++{
++	return _hypercall2(int, xenoprof_op, op, arg);
 +}
 +
-+void dev_changed(const char *node, struct xen_bus_type *bus)
++static inline int __must_check
++HYPERVISOR_kexec_op(
++	unsigned long op, void *args)
 +{
-+	int exists, rootlen;
-+	struct xenbus_device *dev;
-+	char type[BUS_ID_SIZE];
-+	const char *p, *root;
++	return _hypercall2(int, kexec_op, op, args);
++}
 +
-+	if (bus->error || char_count(node, '/') < 2)
-+ 		return;
 +
-+	exists = xenbus_exists(XBT_NIL, node, "");
-+	if (!exists) {
-+		xenbus_cleanup_devices(node, &bus->bus);
-+		return;
-+	}
 +
-+	/* backend/<type>/... or device/<type>/... */
-+	p = strchr(node, '/') + 1;
-+	snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
-+	type[BUS_ID_SIZE-1] = '\0';
++#endif /* __HYPERCALL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/hypervisor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/hypervisor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,259 @@
++/******************************************************************************
++ * hypervisor.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+	rootlen = strsep_len(node, '/', bus->levels);
-+	if (rootlen < 0)
-+		return;
-+	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
-+	if (!root)
-+		return;
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
 +
-+	dev = xenbus_device_find(root, &bus->bus);
-+	if (!dev)
-+		xenbus_probe_node(bus, type, root);
-+	else
-+		put_device(&dev->dev);
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/nmi.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#if defined(__i386__)
++#  ifdef CONFIG_X86_PAE
++#   include <asm-generic/pgtable-nopud.h>
++#  else
++#   include <asm-generic/pgtable-nopmd.h>
++#  endif
++#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++#  include <asm-generic/pgtable-nopud.h>
++#endif
 +
-+	kfree(root);
-+}
++extern shared_info_t *HYPERVISOR_shared_info;
 +
-+static void frontend_changed(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	DPRINTK("");
++#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
++#ifdef CONFIG_SMP
++#define current_vcpu_info() vcpu_info(smp_processor_id())
++#else
++#define current_vcpu_info() vcpu_info(0)
++#endif
 +
-+	dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-+}
++#ifdef CONFIG_X86_32
++extern unsigned long hypervisor_virt_start;
++#endif
 +
-+/* We watch for devices appearing and vanishing. */
-+static struct xenbus_watch fe_watch = {
-+	.node = "device",
-+	.callback = frontend_changed,
-+};
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
++#else
++#define is_initial_xendomain() 0
++#endif
 +
-+static int suspend_dev(struct device *dev, void *data)
-+{
-+	int err = 0;
-+	struct xenbus_driver *drv;
-+	struct xenbus_device *xdev;
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
 +
-+	DPRINTK("");
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
 +
-+	if (dev->driver == NULL)
-+		return 0;
-+	drv = to_xenbus_driver(dev->driver);
-+	xdev = container_of(dev, struct xenbus_device, dev);
-+	if (drv->suspend)
-+		err = drv->suspend(xdev);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
-+	return 0;
-+}
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
 +
-+static int suspend_cancel_dev(struct device *dev, void *data)
-+{
-+	int err = 0;
-+	struct xenbus_driver *drv;
-+	struct xenbus_device *xdev;
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
 +
-+	DPRINTK("");
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
 +
-+	if (dev->driver == NULL)
-+		return 0;
-+	drv = to_xenbus_driver(dev->driver);
-+	xdev = container_of(dev, struct xenbus_device, dev);
-+	if (drv->suspend_cancel)
-+		err = drv->suspend_cancel(xdev);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "xenbus: suspend_cancel %s failed: %i\n",
-+		       dev->bus_id, err);
-+	return 0;
-+}
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
 +
-+static int resume_dev(struct device *dev, void *data)
-+{
-+	int err;
-+	struct xenbus_driver *drv;
-+	struct xenbus_device *xdev;
++void xen_set_ldt(const void *ptr, unsigned int ents);
 +
-+	DPRINTK("");
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
 +
-+	if (dev->driver == NULL)
-+		return 0;
++/* Returns zero on success else negative errno. */
++int xen_create_contiguous_region(
++    unsigned long vstart, unsigned int order, unsigned int address_bits);
++void xen_destroy_contiguous_region(
++    unsigned long vstart, unsigned int order);
 +
-+	drv = to_xenbus_driver(dev->driver);
-+	xdev = container_of(dev, struct xenbus_device, dev);
++struct page;
 +
-+	err = talk_to_otherend(xdev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus: resume (talk_to_otherend) %s failed: %i\n",
-+		       dev->bus_id, err);
-+		return err;
-+	}
++int xen_limit_pages_to_max_mfn(
++	struct page *pages, unsigned int order, unsigned int address_bits);
 +
-+	xdev->state = XenbusStateInitialising;
++/* Turn jiffies into Xen system time. */
++u64 jiffies_to_st(unsigned long jiffies);
 +
-+	if (drv->resume) {
-+		err = drv->resume(xdev);
-+		if (err) { 
-+			printk(KERN_WARNING
-+			       "xenbus: resume %s failed: %i\n", 
-+			       dev->bus_id, err);
-+			return err;
-+		}
-+	}
++#ifdef CONFIG_XEN_SCRUB_PAGES
++void scrub_pages(void *, unsigned int);
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
 +
-+	err = watch_otherend(xdev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: resume (watch_otherend) %s failed: "
-+		       "%d.\n", dev->bus_id, err);
-+		return err;
-+	}
++#include <xen/hypercall.h>
 +
-+	return 0;
-+}
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
 +
-+void xenbus_suspend(void)
++#ifdef CONFIG_XEN
++#define is_running_on_xen() 1
++#else
++extern char *hypercall_stubs;
++#define is_running_on_xen() (!!hypercall_stubs)
++#endif
++
++static inline int
++HYPERVISOR_yield(
++	void)
 +{
-+	DPRINTK("");
++	int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
 +
-+	if (!xenbus_frontend.error)
-+		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
-+	xenbus_backend_suspend(suspend_dev);
-+	xs_suspend();
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (rc == -ENOSYS)
++		rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++	return rc;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_suspend);
 +
-+void xenbus_resume(void)
++static inline int
++HYPERVISOR_block(
++	void)
 +{
-+	xb_init_comms();
-+	xs_resume();
-+	if (!xenbus_frontend.error)
-+		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
-+	xenbus_backend_resume(resume_dev);
++	int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (rc == -ENOSYS)
++		rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
++#endif
++
++	return rc;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_resume);
 +
-+void xenbus_suspend_cancel(void)
++static inline void /*__noreturn*/
++HYPERVISOR_shutdown(
++	unsigned int reason)
 +{
-+	xs_suspend_cancel();
-+	if (!xenbus_frontend.error)
-+		bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
-+	xenbus_backend_resume(suspend_cancel_dev);
++	struct sched_shutdown sched_shutdown = {
++		.reason = reason
++	};
++
++	VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
++#if CONFIG_XEN_COMPAT <= 0x030002
++	VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason));
++#endif
++	/* Don't recurse needlessly. */
++	BUG_ON(reason != SHUTDOWN_crash);
++	for(;;);
 +}
-+EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
 +
-+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
-+int xenstored_ready = 0;
++static inline int __must_check
++HYPERVISOR_poll(
++	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++	int rc;
++	struct sched_poll sched_poll = {
++		.nr_ports = nr_ports,
++		.timeout = jiffies_to_st(timeout)
++	};
++	set_xen_guest_handle(sched_poll.ports, ports);
 +
++	rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (rc == -ENOSYS)
++		rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
 +
-+int register_xenstore_notifier(struct notifier_block *nb)
-+{
-+	int ret = 0;
++	return rc;
++}
 +
-+	if (xenstored_ready > 0)
-+		ret = nb->notifier_call(nb, 0, NULL);
-+	else
-+		atomic_notifier_chain_register(&xenstore_chain, nb);
++#ifdef CONFIG_XEN
 +
-+	return ret;
++static inline void
++MULTI_update_va_mapping(
++    multicall_entry_t *mcl, unsigned long va,
++    pte_t new_val, unsigned long flags)
++{
++    mcl->op = __HYPERVISOR_update_va_mapping;
++    mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++    mcl->args[1] = new_val.pte;
++#elif defined(CONFIG_X86_PAE)
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = new_val.pte_high;
++#else
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = 0;
++#endif
++    mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
 +}
-+EXPORT_SYMBOL_GPL(register_xenstore_notifier);
 +
-+void unregister_xenstore_notifier(struct notifier_block *nb)
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++		     void *uop, unsigned int count)
 +{
-+	atomic_notifier_chain_unregister(&xenstore_chain, nb);
++    mcl->op = __HYPERVISOR_grant_table_op;
++    mcl->args[0] = cmd;
++    mcl->args[1] = (unsigned long)uop;
++    mcl->args[2] = count;
 +}
-+EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 +
++#else /* !defined(CONFIG_XEN) */
++
++/* Multicalls not supported for HVM guests. */
++#define MULTI_update_va_mapping(a,b,c,d) ((void)0)
++#define MULTI_grant_table_op(a,b,c,d) ((void)0)
++
++#endif
++
++#endif /* __HYPERVISOR_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/io.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/io.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,389 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <linux/string.h>
++#include <linux/compiler.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
 +
-+void xenbus_probe(void *unused)
-+{
-+	BUG_ON((xenstored_ready <= 0));
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ *		Linus
++ */
 +
-+	/* Enumerate devices in xenstore and watch for changes. */
-+	xenbus_probe_devices(&xenbus_frontend);
-+	register_xenbus_watch(&fe_watch);
-+	xenbus_backend_probe_and_watch();
++ /*
++  *  Bit simplified and optimized by Jan Hubicka
++  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++  *
++  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++  *  isa_read[wl] and isa_write[wl] fixed
++  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
++  */
 +
-+	/* Notify others that xenstore is up */
-+	atomic_notifier_call_chain(&xenstore_chain, 0, NULL);
-+}
++#define IO_SPACE_LIMIT 0xffff
 +
++#define XQUAD_PORTIO_BASE 0xfe400000
++#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
 +
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+static struct file_operations xsd_kva_fops;
-+static struct proc_dir_entry *xsd_kva_intf;
-+static struct proc_dir_entry *xsd_port_intf;
++#ifdef __KERNEL__
 +
-+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+	size_t size = vma->vm_end - vma->vm_start;
++#include <asm-generic/iomap.h>
 +
-+	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
-+		return -EINVAL;
++#include <linux/vmalloc.h>
++#include <asm/fixmap.h>
 +
-+	if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
-+			    size, vma->vm_page_prot))
-+		return -EAGAIN;
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p)	__va(p)
 +
-+	return 0;
-+}
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p)	p
 +
-+static int xsd_kva_read(char *page, char **start, off_t off,
-+			int count, int *eof, void *data)
++/**
++ *	virt_to_phys	-	map virtual addresses to physical
++ *	@address: address to remap
++ *
++ *	The returned physical address is the physical (CPU) mapping for
++ *	the memory address given. It is only valid to use this function on
++ *	addresses directly mapped or allocated via kmalloc. 
++ *
++ *	This function does not give bus mappings for DMA transfers. In
++ *	almost all conceivable cases a device driver should not be using
++ *	this function
++ */
++ 
++static inline unsigned long virt_to_phys(volatile void * address)
 +{
-+	int len;
-+
-+	len  = sprintf(page, "0x%p", xen_store_interface);
-+	*eof = 1;
-+	return len;
++	return __pa(address);
 +}
 +
-+static int xsd_port_read(char *page, char **start, off_t off,
-+			 int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	len  = sprintf(page, "%d", xen_store_evtchn);
-+	*eof = 1;
-+	return len;
-+}
-+#endif
++/**
++ *	phys_to_virt	-	map physical address to virtual
++ *	@address: address to remap
++ *
++ *	The returned virtual address is a current CPU mapping for
++ *	the memory address given. It is only valid to use this function on
++ *	addresses that have a kernel mapping
++ *
++ *	This function does not handle bus mappings for DMA transfers. In
++ *	almost all conceivable cases a device driver should not be using
++ *	this function
++ */
 +
-+static int xenbus_probe_init(void)
++static inline void * phys_to_virt(unsigned long address)
 +{
-+	int err = 0;
-+	unsigned long page = 0;
-+
-+	DPRINTK("");
-+
-+	if (!is_running_on_xen())
-+		return -ENODEV;
-+
-+	/* Register ourselves with the kernel bus subsystem */
-+	xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
-+	if (xenbus_frontend.error)
-+		printk(KERN_WARNING
-+		       "XENBUS: Error registering frontend bus: %i\n",
-+		       xenbus_frontend.error);
-+	xenbus_backend_bus_register();
-+
-+	/*
-+	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
-+	 */
-+	if (is_initial_xendomain()) {
-+		struct evtchn_alloc_unbound alloc_unbound;
-+
-+		/* Allocate page. */
-+		page = get_zeroed_page(GFP_KERNEL);
-+		if (!page)
-+			return -ENOMEM;
++	return __va(address);
++}
 +
-+		xen_store_mfn = xen_start_info->store_mfn =
-+			pfn_to_mfn(virt_to_phys((void *)page) >>
-+				   PAGE_SHIFT);
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
 +
-+		/* Next allocate a local port which xenstored can bind to */
-+		alloc_unbound.dom        = DOMID_SELF;
-+		alloc_unbound.remote_dom = 0;
++#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
++				  (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
++				  (unsigned long) (bv)->bv_offset)
 +
-+		err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+						  &alloc_unbound);
-+		if (err == -ENOSYS)
-+			goto err;
-+		BUG_ON(err);
-+		xen_store_evtchn = xen_start_info->store_evtchn =
-+			alloc_unbound.port;
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
++	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++	  bvec_to_pseudophys((vec2))))
 +
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+		/* And finally publish the above info in /proc/xen */
-+		xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
-+		if (xsd_kva_intf) {
-+			memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
-+			       sizeof(xsd_kva_fops));
-+			xsd_kva_fops.mmap = xsd_kva_mmap;
-+			xsd_kva_intf->proc_fops = &xsd_kva_fops;
-+			xsd_kva_intf->read_proc = xsd_kva_read;
-+		}
-+		xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
-+		if (xsd_port_intf)
-+			xsd_port_intf->read_proc = xsd_port_read;
-+#endif
-+		xen_store_interface = mfn_to_virt(xen_store_mfn);
-+	} else {
-+		xenstored_ready = 1;
-+#ifdef CONFIG_XEN
-+		xen_store_evtchn = xen_start_info->store_evtchn;
-+		xen_store_mfn = xen_start_info->store_mfn;
-+		xen_store_interface = mfn_to_virt(xen_store_mfn);
-+#else
-+		xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
-+		xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
-+		xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
-+					      PAGE_SIZE);
-+#endif
-+	}
++extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 +
++/**
++ * ioremap     -   map bus memory into CPU space
++ * @offset:    bus address of the memory
++ * @size:      size of the resource to map
++ *
++ * ioremap performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address. 
++ */
 +
-+	xenbus_dev_init();
++static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
++{
++	return __ioremap(offset, size, 0);
++}
 +
-+	/* Initialize the interface to xenstore. */
-+	err = xs_init();
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "XENBUS: Error initializing xenstore comms: %i\n", err);
-+		goto err;
-+	}
++extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
 +
-+	/* Register ourselves with the kernel device subsystem */
-+	if (!xenbus_frontend.error) {
-+		xenbus_frontend.error = device_register(&xenbus_frontend.dev);
-+		if (xenbus_frontend.error) {
-+			bus_unregister(&xenbus_frontend.bus);
-+			printk(KERN_WARNING
-+			       "XENBUS: Error registering frontend device: %i\n",
-+			       xenbus_frontend.error);
-+		}
-+	}
-+	xenbus_backend_device_register();
++/*
++ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
++ * mappings, before the real ioremap() is functional.
++ * A boot-time mapping is currently limited to at most 16 pages.
++ */
++extern void *bt_ioremap(unsigned long offset, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
 +
-+	if (!is_initial_xendomain())
-+		xenbus_probe(NULL);
++/* Use early IO mappings for DMI because it's initialized early */
++#define dmi_ioremap bt_ioremap
++#define dmi_iounmap bt_iounmap
++#define dmi_alloc alloc_bootmem
 +
-+	return 0;
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
 +
-+ err:
-+	if (page)
-+		free_page(page);
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
 +
-+	/*
-+	 * Do not unregister the xenbus front/backend buses here. The buses
-+	 * must exist because front/backend drivers will use them when they are
-+	 * registered.
-+	 */
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
 +
-+	return err;
++static inline unsigned char readb(const volatile void __iomem *addr)
++{
++	return *(volatile unsigned char __force *) addr;
 +}
-+
-+#ifdef CONFIG_XEN
-+postcore_initcall(xenbus_probe_init);
-+MODULE_LICENSE("Dual BSD/GPL");
-+#else
-+int xenbus_init(void)
++static inline unsigned short readw(const volatile void __iomem *addr)
 +{
-+	return xenbus_probe_init();
++	return *(volatile unsigned short __force *) addr;
 +}
-+#endif
-+
-+static int is_disconnected_device(struct device *dev, void *data)
++static inline unsigned int readl(const volatile void __iomem *addr)
 +{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct device_driver *drv = data;
-+	struct xenbus_driver *xendrv;
-+
-+	/*
-+	 * A device with no driver will never connect. We care only about
-+	 * devices which should currently be in the process of connecting.
-+	 */
-+	if (!dev->driver)
-+		return 0;
-+
-+	/* Is this search limited to a particular driver? */
-+	if (drv && (dev->driver != drv))
-+		return 0;
-+
-+	xendrv = to_xenbus_driver(dev->driver);
-+	return (xendev->state < XenbusStateConnected ||
-+		(xendrv->is_ready && !xendrv->is_ready(xendev)));
++	return *(volatile unsigned int __force *) addr;
 +}
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
 +
-+static int exists_disconnected_device(struct device_driver *drv)
++static inline void writeb(unsigned char b, volatile void __iomem *addr)
 +{
-+	if (xenbus_frontend.error)
-+		return xenbus_frontend.error;
-+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-+				is_disconnected_device);
++	*(volatile unsigned char __force *) addr = b;
 +}
-+
-+static int print_device_status(struct device *dev, void *data)
++static inline void writew(unsigned short b, volatile void __iomem *addr)
 +{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct device_driver *drv = data;
-+
-+	/* Is this operation limited to a particular driver? */
-+	if (drv && (dev->driver != drv))
-+		return 0;
++	*(volatile unsigned short __force *) addr = b;
++}
++static inline void writel(unsigned int b, volatile void __iomem *addr)
++{
++	*(volatile unsigned int __force *) addr = b;
++}
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
 +
-+	if (!dev->driver) {
-+		/* Information only: is this too noisy? */
-+		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
-+		       xendev->nodename);
-+	} else if (xendev->state < XenbusStateConnected) {
-+		enum xenbus_state rstate = XenbusStateUnknown;
-+		if (xendev->otherend)
-+			rstate = xenbus_read_driver_state(xendev->otherend);
-+		printk(KERN_WARNING "XENBUS: Timeout connecting "
-+		       "to device: %s (local state %d, remote state %d)\n",
-+		       xendev->nodename, xendev->state, rstate);
-+	}
++#define mmiowb()
 +
-+	return 0;
++static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
++{
++	memset((void __force *) addr, val, count);
++}
++static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
++{
++	__memcpy(dst, (void __force *) src, count);
++}
++static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
++{
++	__memcpy((void __force *) dst, src, count);
 +}
-+
-+/* We only wait for device setup after most initcalls have run. */
-+static int ready_to_wait_for_devices;
 +
 +/*
-+ * On a 5-minute timeout, wait for all devices currently configured.  We need
-+ * to do this to guarantee that the filesystems and / or network devices
-+ * needed for boot are available, before we can allow the boot to proceed.
-+ *
-+ * This needs to be on a late_initcall, to happen after the frontend device
-+ * drivers have been initialised, but before the root fs is mounted.
-+ *
-+ * A possible improvement here would be to have the tools add a per-device
-+ * flag to the store entry, indicating whether it is needed at boot time.
-+ * This would allow people who knew what they were doing to accelerate their
-+ * boot slightly, but of course needs tools or manual intervention to set up
-+ * those flags correctly.
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
 + */
-+static void wait_for_devices(struct xenbus_driver *xendrv)
-+{
-+	unsigned long start = jiffies;
-+	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
-+	unsigned int seconds_waited = 0;
-+
-+	if (!ready_to_wait_for_devices || !is_running_on_xen())
-+		return;
-+
-+	while (exists_disconnected_device(drv)) {
-+		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-+			if (!seconds_waited)
-+				printk(KERN_WARNING "XENBUS: Waiting for "
-+				       "devices to initialise: ");
-+			seconds_waited += 5;
-+			printk("%us...", 300 - seconds_waited);
-+			if (seconds_waited == 300)
-+				break;
-+		}
-+		
-+		schedule_timeout_interruptible(HZ/10);
-+	}
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
 +
-+	if (seconds_waited)
-+		printk("\n");
++/*
++ * Again, i386 does not require mem IO specific function.
++ */
 +
-+	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-+			 print_device_status);
-+}
++#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
 +
-+#ifndef MODULE
-+static int __init boot_wait_for_devices(void)
++/**
++ *	check_signature		-	find BIOS signatures
++ *	@io_addr: mmio address to check 
++ *	@signature:  signature block
++ *	@length: length of signature
++ *
++ *	Perform a signature comparison with the mmio address io_addr. This
++ *	address should have been obtained by ioremap.
++ *	Returns 1 on a match.
++ */
++ 
++static inline int check_signature(volatile void __iomem * io_addr,
++	const unsigned char *signature, int length)
 +{
-+	if (!xenbus_frontend.error) {
-+		ready_to_wait_for_devices = 1;
-+		wait_for_devices(NULL);
-+	}
-+	return 0;
++	int retval = 0;
++	do {
++		if (readb(io_addr) != *signature)
++			goto out;
++		io_addr++;
++		signature++;
++		length--;
++	} while (length);
++	retval = 1;
++out:
++	return retval;
 +}
 +
-+late_initcall(boot_wait_for_devices);
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe.h tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe.h
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_probe.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_probe.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,75 @@
-+/******************************************************************************
-+ * xenbus_probe.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
++/*
++ *	Cache management
 + *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ *	This needed for two cases
++ *	1. Out of order aware processors
++ *	2. Accidentally out of order processors (PPro errata #51)
 + */
++ 
++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
 +
-+#ifndef _XENBUS_PROBE_H
-+#define _XENBUS_PROBE_H
-+
-+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
-+extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-+extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-+extern void xenbus_backend_probe_and_watch(void);
-+extern void xenbus_backend_bus_register(void);
-+extern void xenbus_backend_device_register(void);
-+#else
-+static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-+static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-+static inline void xenbus_backend_probe_and_watch(void) {}
-+static inline void xenbus_backend_bus_register(void) {}
-+static inline void xenbus_backend_device_register(void) {}
-+#endif
-+
-+struct xen_bus_type
++static inline void flush_write_buffers(void)
 +{
-+	char *root;
-+	int error;
-+	unsigned int levels;
-+	int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
-+	int (*probe)(const char *type, const char *dir);
-+	struct bus_type bus;
-+	struct device dev;
-+};
++	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
++}
++
++#define dma_cache_inv(_start,_size)		flush_write_buffers()
++#define dma_cache_wback(_start,_size)		flush_write_buffers()
++#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
 +
-+extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
-+extern int xenbus_dev_probe(struct device *_dev);
-+extern int xenbus_dev_remove(struct device *_dev);
-+extern int xenbus_register_driver_common(struct xenbus_driver *drv,
-+					 struct xen_bus_type *bus);
-+extern int xenbus_probe_node(struct xen_bus_type *bus,
-+			     const char *type,
-+			     const char *nodename);
-+extern int xenbus_probe_devices(struct xen_bus_type *bus);
++#else
 +
-+extern void dev_changed(const char *node, struct xen_bus_type *bus);
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size)		do { } while (0)
++#define dma_cache_wback(_start,_size)		do { } while (0)
++#define dma_cache_wback_inv(_start,_size)	do { } while (0)
++#define flush_write_buffers()
 +
 +#endif
 +
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_xs.c tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_xs.c
---- pristine-linux-2.6.18.2/drivers/xen/xenbus/xenbus_xs.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenbus/xenbus_xs.c	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,886 @@
-+/******************************************************************************
-+ * xenbus_xs.c
-+ *
-+ * This is the kernel equivalent of the "xs" library.  We don't need everything
-+ * and we use xenbus_comms for communication.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++#endif /* __KERNEL__ */
 +
-+#include <linux/unistd.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/uio.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/err.h>
-+#include <linux/slab.h>
-+#include <linux/fcntl.h>
-+#include <linux/kthread.h>
-+#include <linux/rwsem.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <xen/xenbus.h>
-+#include "xenbus_comms.h"
++#ifdef SLOW_IO_BY_JUMPING
++#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
++#else
++#define __SLOW_DOWN_IO "outb %%al,$0x80;"
++#endif
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
++static inline void slow_down_io(void) {
++	__asm__ __volatile__(
++		__SLOW_DOWN_IO
++#ifdef REALLY_SLOW_IO
++		__SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
 +#endif
++		: : );
++}
 +
-+#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
-+#define PF_NOFREEZE	0
++#ifdef CONFIG_X86_NUMAQ
++extern void *xquad_portio;    /* Where the IO area was mapped */
++#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
++	if (xquad_portio) \
++		write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
++	else \
++		out##bwl##_local(value, port); \
++} \
++static inline void out##bwl(unsigned type value, int port) { \
++	out##bwl##_quad(value, port, 0); \
++} \
++static inline unsigned type in##bwl##_quad(int port, int quad) { \
++	if (xquad_portio) \
++		return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
++	else \
++		return in##bwl##_local(port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++	return in##bwl##_quad(port, 0); \
++}
++#else
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl(unsigned type value, int port) { \
++	out##bwl##_local(value, port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++	return in##bwl##_local(port); \
++}
 +#endif
 +
-+struct xs_stored_msg {
-+	struct list_head list;
 +
-+	struct xsd_sockmsg hdr;
++#define BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_local(unsigned type value, int port) { \
++	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
++} \
++static inline unsigned type in##bwl##_local(int port) { \
++	unsigned type value; \
++	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
++	return value; \
++} \
++static inline void out##bwl##_local_p(unsigned type value, int port) { \
++	out##bwl##_local(value, port); \
++	slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_local_p(int port) { \
++	unsigned type value = in##bwl##_local(port); \
++	slow_down_io(); \
++	return value; \
++} \
++__BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_p(unsigned type value, int port) { \
++	out##bwl(value, port); \
++	slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_p(int port) { \
++	unsigned type value = in##bwl(port); \
++	slow_down_io(); \
++	return value; \
++} \
++static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
++	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
++} \
++static inline void ins##bwl(int port, void *addr, unsigned long count) { \
++	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
++}
 +
-+	union {
-+		/* Queued replies. */
-+		struct {
-+			char *body;
-+		} reply;
++BUILDIO(b,b,char)
++BUILDIO(w,w,short)
++BUILDIO(l,,int)
 +
-+		/* Queued watch events. */
-+		struct {
-+			struct xenbus_watch *handle;
-+			char **vec;
-+			unsigned int vec_size;
-+		} watch;
-+	} u;
-+};
++/* We will be supplying our own /dev/mem implementation */
++#define ARCH_HAS_DEV_MEM
 +
-+struct xs_handle {
-+	/* A list of replies. Currently only one will ever be outstanding. */
-+	struct list_head reply_list;
-+	spinlock_t reply_lock;
-+	wait_queue_head_t reply_waitq;
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/irqflags.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/irqflags.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,127 @@
++/*
++ * include/asm-i386/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
 +
-+	/*
-+	 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
-+	 * response_mutex is never taken simultaneously with the other three.
-+	 */
++#ifndef __ASSEMBLY__
 +
-+	/* One request at a time. */
-+	struct mutex request_mutex;
++/* 
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
 +
-+	/* Protect xenbus reader thread against save/restore. */
-+	struct mutex response_mutex;
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
 +
-+	/* Protect transactions against save/restore. */
-+	struct rw_semaphore transaction_mutex;
++#define raw_local_save_flags(flags) \
++		do { (flags) = __raw_local_save_flags(); } while (0)
 +
-+	/* Protect watch (de)register against save/restore. */
-+	struct rw_semaphore watch_mutex;
-+};
++#define raw_local_irq_restore(x)					\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	_vcpu = current_vcpu_info();					\
++	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
++		barrier(); /* unmask then check (avoid races) */	\
++		if (unlikely(_vcpu->evtchn_upcall_pending))		\
++			force_evtchn_callback();			\
++	}								\
++} while (0)
 +
-+static struct xs_handle xs_state;
++#define raw_local_irq_disable()						\
++do {									\
++	current_vcpu_info()->evtchn_upcall_mask = 1;			\
++	barrier();							\
++} while (0)
 +
-+/* List of registered watches, and a lock to protect it. */
-+static LIST_HEAD(watches);
-+static DEFINE_SPINLOCK(watches_lock);
++#define raw_local_irq_enable()						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	_vcpu = current_vcpu_info();					\
++	_vcpu->evtchn_upcall_mask = 0;					\
++	barrier(); /* unmask then check (avoid races) */		\
++	if (unlikely(_vcpu->evtchn_upcall_pending))			\
++		force_evtchn_callback();				\
++} while (0)
 +
-+/* List of pending watch callback events, and a lock to protect it. */
-+static LIST_HEAD(watch_events);
-+static DEFINE_SPINLOCK(watch_events_lock);
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
 +
 +/*
-+ * Details of the xenwatch callback kernel thread. The thread waits on the
-+ * watch_events_waitq for work to do (queued on watch_events list). When it
-+ * wakes up it acquires the xenwatch_mutex before reading the list and
-+ * carrying out work.
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
 + */
-+static pid_t xenwatch_pid;
-+/* static */ DEFINE_MUTEX(xenwatch_mutex);
-+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
++void halt(void);
 +
-+static int get_error(const char *errorstring)
++static inline int raw_irqs_disabled_flags(unsigned long flags)
 +{
-+	unsigned int i;
-+
-+	for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
-+		if (i == ARRAY_SIZE(xsd_errors) - 1) {
-+			printk(KERN_WARNING
-+			       "XENBUS xen store gave: unknown error %s",
-+			       errorstring);
-+			return EINVAL;
-+		}
-+	}
-+	return xsd_errors[i].errnum;
++	return (flags != 0);
 +}
 +
-+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
-+{
-+	struct xs_stored_msg *msg;
-+	char *body;
++#define raw_irqs_disabled()						\
++({									\
++	unsigned long flags = __raw_local_save_flags();			\
++									\
++	raw_irqs_disabled_flags(flags);					\
++})
 +
-+	spin_lock(&xs_state.reply_lock);
++/*
++ * For spinlocks, etc:
++ */
++#define __raw_local_irq_save()						\
++({									\
++	unsigned long flags = __raw_local_save_flags();			\
++									\
++	raw_local_irq_disable();					\
++									\
++	flags;								\
++})
 +
-+	while (list_empty(&xs_state.reply_list)) {
-+		spin_unlock(&xs_state.reply_lock);
-+		/* XXX FIXME: Avoid synchronous wait for response here. */
-+		wait_event(xs_state.reply_waitq,
-+			   !list_empty(&xs_state.reply_list));
-+		spin_lock(&xs_state.reply_lock);
-+	}
++#define raw_local_irq_save(flags) \
++		do { (flags) = __raw_local_irq_save(); } while (0)
 +
-+	msg = list_entry(xs_state.reply_list.next,
-+			 struct xs_stored_msg, list);
-+	list_del(&msg->list);
++#endif /* __ASSEMBLY__ */
 +
-+	spin_unlock(&xs_state.reply_lock);
++/*
++ * Do the CPU's IRQ-state tracing from assembly code. We call a
++ * C function, so save all the C-clobbered registers:
++ */
++#ifdef CONFIG_TRACE_IRQFLAGS
 +
-+	*type = msg->hdr.type;
-+	if (len)
-+		*len = msg->hdr.len;
-+	body = msg->u.reply.body;
++# define TRACE_IRQS_ON				\
++	pushl %eax;				\
++	pushl %ecx;				\
++	pushl %edx;				\
++	call trace_hardirqs_on;			\
++	popl %edx;				\
++	popl %ecx;				\
++	popl %eax;
 +
-+	kfree(msg);
++# define TRACE_IRQS_OFF				\
++	pushl %eax;				\
++	pushl %ecx;				\
++	pushl %edx;				\
++	call trace_hardirqs_off;		\
++	popl %edx;				\
++	popl %ecx;				\
++	popl %eax;
 +
-+	return body;
-+}
++#else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++#endif
 +
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
-+{
-+	void *ret;
-+	struct xsd_sockmsg req_msg = *msg;
-+	int err;
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/maddr.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/maddr.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,193 @@
++#ifndef _I386_MADDR_H
++#define _I386_MADDR_H
 +
-+	if (req_msg.type == XS_TRANSACTION_START)
-+		down_read(&xs_state.transaction_mutex);
++#include <xen/features.h>
++#include <xen/interface/xen.h>
 +
-+	mutex_lock(&xs_state.request_mutex);
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY	(~0UL)
++#define FOREIGN_FRAME_BIT	(1UL<<31)
++#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
 +
-+	err = xb_write(msg, sizeof(*msg) + msg->len);
-+	if (err) {
-+		msg->type = XS_ERROR;
-+		ret = ERR_PTR(err);
-+	} else
-+		ret = read_reply(&msg->type, &msg->len);
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
 +
-+	mutex_unlock(&xs_state.request_mutex);
++#ifdef CONFIG_XEN
 +
-+	if ((req_msg.type == XS_TRANSACTION_END) ||
-+	    ((req_msg.type == XS_TRANSACTION_START) &&
-+	     (msg->type == XS_ERROR)))
-+		up_read(&xs_state.transaction_mutex);
++extern unsigned long *phys_to_machine_mapping;
++extern unsigned long  max_mapnr;
 +
-+	return ret;
-+}
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int   machine_to_phys_order;
 +
-+/* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
-+static void *xs_talkv(struct xenbus_transaction t,
-+		      enum xsd_sockmsg_type type,
-+		      const struct kvec *iovec,
-+		      unsigned int num_vecs,
-+		      unsigned int *len)
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
 +{
-+	struct xsd_sockmsg msg;
-+	void *ret = NULL;
-+	unsigned int i;
-+	int err;
-+
-+	msg.tx_id = t.id;
-+	msg.req_id = 0;
-+	msg.type = type;
-+	msg.len = 0;
-+	for (i = 0; i < num_vecs; i++)
-+		msg.len += iovec[i].iov_len;
-+
-+	mutex_lock(&xs_state.request_mutex);
-+
-+	err = xb_write(&msg, sizeof(msg));
-+	if (err) {
-+		mutex_unlock(&xs_state.request_mutex);
-+		return ERR_PTR(err);
-+	}
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return pfn;
++	BUG_ON(max_mapnr && pfn >= max_mapnr);
++	return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
 +
-+	for (i = 0; i < num_vecs; i++) {
-+		err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
-+		if (err) {
-+			mutex_unlock(&xs_state.request_mutex);
-+			return ERR_PTR(err);
-+		}
-+	}
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return 1;
++	BUG_ON(max_mapnr && pfn >= max_mapnr);
++	return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
 +
-+	ret = read_reply(&msg.type, len);
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++	unsigned long pfn;
 +
-+	mutex_unlock(&xs_state.request_mutex);
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return mfn;
 +
-+	if (IS_ERR(ret))
-+		return ret;
++	if (unlikely((mfn >> machine_to_phys_order) != 0))
++		return max_mapnr;
 +
-+	if (msg.type == XS_ERROR) {
-+		err = get_error(ret);
-+		kfree(ret);
-+		return ERR_PTR(-err);
-+	}
++	/* The array access can fail (e.g., device space beyond end of RAM). */
++	asm (
++		"1:	movl %1,%0\n"
++		"2:\n"
++		".section .fixup,\"ax\"\n"
++		"3:	movl %2,%0\n"
++		"	jmp  2b\n"
++		".previous\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 4\n"
++		"	.long 1b,3b\n"
++		".previous"
++		: "=r" (pfn)
++		: "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
 +
-+	if (msg.type != type) {
-+		if (printk_ratelimit())
-+			printk(KERN_WARNING
-+			       "XENBUS unexpected type [%d], expected [%d]\n",
-+			       msg.type, type);
-+		kfree(ret);
-+		return ERR_PTR(-EINVAL);
-+	}
-+	return ret;
++	return pfn;
 +}
 +
-+/* Simplified version of xs_talkv: single message. */
-+static void *xs_single(struct xenbus_transaction t,
-+		       enum xsd_sockmsg_type type,
-+		       const char *string,
-+		       unsigned int *len)
++/*
++ * We detect special mappings in one of two ways:
++ *  1. If the MFN is an I/O page then Xen will set the m2p entry
++ *     to be outside our maximum possible pseudophys range.
++ *  2. If the MFN belongs to a different domain then we will certainly
++ *     not have MFN in our p2m table. Conversely, if the page is ours,
++ *     then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
++ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
 +{
-+	struct kvec iovec;
++	unsigned long pfn = mfn_to_pfn(mfn);
++	if ((pfn < max_mapnr)
++	    && !xen_feature(XENFEAT_auto_translated_physmap)
++	    && (phys_to_machine_mapping[pfn] != mfn))
++		return max_mapnr; /* force !pfn_valid() */
++	return pfn;
++}
 +
-+	iovec.iov_base = (void *)string;
-+	iovec.iov_len = strlen(string) + 1;
-+	return xs_talkv(t, type, &iovec, 1, len);
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++	BUG_ON(max_mapnr && pfn >= max_mapnr);
++	if (xen_feature(XENFEAT_auto_translated_physmap)) {
++		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++		return;
++	}
++	phys_to_machine_mapping[pfn] = mfn;
 +}
 +
-+/* Many commands only need an ack, don't care what it says. */
-+static int xs_error(char *reply)
++static inline maddr_t phys_to_machine(paddr_t phys)
 +{
-+	if (IS_ERR(reply))
-+		return PTR_ERR(reply);
-+	kfree(reply);
-+	return 0;
++	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++	return machine;
 +}
 +
-+static unsigned int count_strings(const char *strings, unsigned int len)
++static inline paddr_t machine_to_phys(maddr_t machine)
 +{
-+	unsigned int num;
-+	const char *p;
-+
-+	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
-+		num++;
-+
-+	return num;
++	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++	return phys;
 +}
 +
-+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
-+static char *join(const char *dir, const char *name)
++#ifdef CONFIG_X86_PAE
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
 +{
-+	char *buffer;
-+
-+	if (strlen(name) == 0)
-+		buffer = kasprintf(GFP_KERNEL, "%s", dir);
-+	else
-+		buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
-+	return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
++	/*
++	 * In PAE mode, the NX bit needs to be dealt with in the value
++	 * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
++	 * but for i386 the conversion to ulong for the argument will
++	 * clip it off.
++	 */
++	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++	return machine;
 +}
 +
-+static char **split(char *strings, unsigned int len, unsigned int *num)
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
 +{
-+	char *p, **ret;
-+
-+	/* Count the strings. */
-+	*num = count_strings(strings, len);
-+
-+	/* Transfer to one big alloc for easy freeing. */
-+	ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
-+	if (!ret) {
-+		kfree(strings);
-+		return ERR_PTR(-ENOMEM);
-+	}
-+	memcpy(&ret[*num], strings, len);
-+	kfree(strings);
-+
-+	strings = (char *)&ret[*num];
-+	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
-+		ret[(*num)++] = p;
-+
-+	return ret;
++	/*
++	 * In PAE mode, the NX bit needs to be dealt with in the value
++	 * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
++	 * but for i386 the conversion to ulong for the argument will
++	 * clip it off.
++	 */
++	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++	return phys;
 +}
++#endif
 +
-+char **xenbus_directory(struct xenbus_transaction t,
-+			const char *dir, const char *node, unsigned int *num)
++#ifdef CONFIG_X86_PAE
++#define __pte_ma(x)	((pte_t) { (x), (maddr_t)(x) >> 32 } )
++static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
 +{
-+	char *strings, *path;
-+	unsigned int len;
++	pte_t pte;
 +
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return (char **)path;
++	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
++					(pgprot_val(pgprot) >> 32);
++	pte.pte_high &= (__supported_pte_mask >> 32);
++	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
++							__supported_pte_mask;
++	return pte;
++}
++#else
++#define __pte_ma(x)	((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot)	__pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#endif
 +
-+	strings = xs_single(t, XS_DIRECTORY, path, &len);
-+	kfree(path);
-+	if (IS_ERR(strings))
-+		return (char **)strings;
++#else /* !CONFIG_XEN */
 +
-+	return split(strings, len, num);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_directory);
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
 +
-+/* Check if a path exists. Return 1 if it does. */
-+int xenbus_exists(struct xenbus_transaction t,
-+		  const char *dir, const char *node)
-+{
-+	char **d;
-+	int dir_n;
++#endif /* !CONFIG_XEN */
 +
-+	d = xenbus_directory(t, dir, node, &dir_n);
-+	if (IS_ERR(d))
-+		return 0;
-+	kfree(d);
-+	return 1;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_exists);
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
++#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
 +
-+/* Get the value of a single file.
-+ * Returns a kmalloced value: call free() on it after use.
-+ * len indicates length in bytes.
++#endif /* _I386_MADDR_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/mmu.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/mmu.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,29 @@
++#ifndef __i386_MMU_H
++#define __i386_MMU_H
++
++#include <asm/semaphore.h>
++/*
++ * The i386 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
 + */
-+void *xenbus_read(struct xenbus_transaction t,
-+		  const char *dir, const char *node, unsigned int *len)
-+{
-+	char *path;
-+	void *ret;
++typedef struct { 
++	int size;
++	struct semaphore sem;
++	void *ldt;
++	void *vdso;
++#ifdef CONFIG_XEN
++	int has_foreign_mappings;
++#endif
++} mm_context_t;
 +
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return (void *)path;
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
 +
-+	ret = xs_single(t, XS_READ, path, len);
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_read);
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
 +
-+/* Write the value of a single file.
-+ * Returns -err on failure.
-+ */
-+int xenbus_write(struct xenbus_transaction t,
-+		 const char *dir, const char *node, const char *string)
-+{
-+	const char *path;
-+	struct kvec iovec[2];
-+	int ret;
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/mmu_context.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/mmu_context.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,108 @@
++#ifndef __I386_SCHED_H
++#define __I386_SCHED_H
 +
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
 +
-+	iovec[0].iov_base = (void *)path;
-+	iovec[0].iov_len = strlen(path) + 1;
-+	iovec[1].iov_base = (void *)string;
-+	iovec[1].iov_len = strlen(string);
++/*
++ * Used for LDT copy/destruction.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
 +
-+	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_write);
 +
-+/* Create a new directory. */
-+int xenbus_mkdir(struct xenbus_transaction t,
-+		 const char *dir, const char *node)
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 +{
-+	char *path;
-+	int ret;
++#if 0 /* XEN: no lazy tlb */
++	unsigned cpu = smp_processor_id();
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
++#endif
++}
 +
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
++#define prepare_arch_switch(next)	__prepare_arch_switch()
 +
-+	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
-+	kfree(path);
-+	return ret;
++static inline void __prepare_arch_switch(void)
++{
++	/*
++	 * Save away %fs and %gs. No need to save %es and %ds, as those
++	 * are always kernel segments while inside the kernel. Must
++	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
++	 */
++	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
++		: "=m" (current->thread.fs),
++		  "=m" (current->thread.gs));
++	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++		: : "r" (0) );
 +}
-+EXPORT_SYMBOL_GPL(xenbus_mkdir);
 +
-+/* Destroy a file or directory (directories must be empty). */
-+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev,
++			     struct mm_struct *next,
++			     struct task_struct *tsk)
 +{
-+	char *path;
-+	int ret;
++	int cpu = smp_processor_id();
++	struct mmuext_op _op[2], *op = _op;
 +
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
++	if (likely(prev != next)) {
++		BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++		       !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
 +
-+	ret = xs_error(xs_single(t, XS_RM, path, NULL));
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_rm);
++		/* stop flush ipis for the previous mm */
++		cpu_clear(cpu, prev->cpu_vm_mask);
++#if 0 /* XEN: no lazy tlb */
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++		per_cpu(cpu_tlbstate, cpu).active_mm = next;
++#endif
++		cpu_set(cpu, next->cpu_vm_mask);
 +
-+/* Start a transaction: changes by others will not be seen during this
-+ * transaction, and changes will not be visible to others until end.
-+ */
-+int xenbus_transaction_start(struct xenbus_transaction *t)
-+{
-+	char *id_str;
++		/* Re-load page tables: load_cr3(next->pgd) */
++		op->cmd = MMUEXT_NEW_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++		op++;
 +
-+	down_read(&xs_state.transaction_mutex);
++		/*
++		 * load the LDT, if the LDT is different:
++		 */
++		if (unlikely(prev->context.ldt != next->context.ldt)) {
++			/* load_LDT_nolock(&next->context, cpu) */
++			op->cmd = MMUEXT_SET_LDT;
++			op->arg1.linear_addr = (unsigned long)next->context.ldt;
++			op->arg2.nr_ents     = next->context.size;
++			op++;
++		}
 +
-+	id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
-+	if (IS_ERR(id_str)) {
-+		up_read(&xs_state.transaction_mutex);
-+		return PTR_ERR(id_str);
++		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
 +	}
++#if 0 /* XEN: no lazy tlb */
++	else {
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
 +
-+	t->id = simple_strtoul(id_str, NULL, 0);
-+	kfree(id_str);
-+	return 0;
++		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++			/* We were in lazy tlb mode and leave_mm disabled 
++			 * tlb flush IPI delivery. We must reload %cr3.
++			 */
++			load_cr3(next->pgd);
++			load_LDT_nolock(&next->context, cpu);
++		}
++	}
++#endif
 +}
-+EXPORT_SYMBOL_GPL(xenbus_transaction_start);
 +
-+/* End a transaction.
-+ * If abandon is true, transaction is discarded instead of committed.
-+ */
-+int xenbus_transaction_end(struct xenbus_transaction t, int abort)
++#define deactivate_mm(tsk, mm) \
++	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 +{
-+	char abortstr[2];
-+	int err;
++	if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++		mm_pin(next);
++	switch_mm(prev, next, NULL);
++}
 +
-+	if (abort)
-+		strcpy(abortstr, "F");
-+	else
-+		strcpy(abortstr, "T");
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/page.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/page.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,233 @@
++#ifndef _I386_PAGE_H
++#define _I386_PAGE_H
 +
-+	err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT	12
++#define PAGE_SIZE	(1UL << PAGE_SHIFT)
++#define PAGE_MASK	(~(PAGE_SIZE-1))
 +
-+	up_read(&xs_state.transaction_mutex);
++#ifdef CONFIG_X86_PAE
++#define __PHYSICAL_MASK_SHIFT	40
++#define __PHYSICAL_MASK		((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
++#define PHYSICAL_PAGE_MASK	(~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
++#else
++#define __PHYSICAL_MASK_SHIFT	32
++#define __PHYSICAL_MASK		(~0UL)
++#define PHYSICAL_PAGE_MASK	(PAGE_MASK & __PHYSICAL_MASK)
++#endif
 +
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_transaction_end);
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
 +
-+/* Single read and scanf: returns -errno or num scanned. */
-+int xenbus_scanf(struct xenbus_transaction t,
-+		 const char *dir, const char *node, const char *fmt, ...)
-+{
-+	va_list ap;
-+	int ret;
-+	char *val;
++#ifdef __KERNEL__
 +
-+	val = xenbus_read(t, dir, node, NULL);
-+	if (IS_ERR(val))
-+		return PTR_ERR(val);
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT	0x001
++#define _PAGE_IO	0x200
 +
-+	va_start(ap, fmt);
-+	ret = vsscanf(val, fmt, ap);
-+	va_end(ap);
-+	kfree(val);
-+	/* Distinctive errno. */
-+	if (ret == 0)
-+		return -ERANGE;
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_scanf);
++#ifndef __ASSEMBLY__
 +
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(struct xenbus_transaction t,
-+		  const char *dir, const char *node, const char *fmt, ...)
-+{
-+	va_list ap;
-+	int ret;
-+#define PRINTF_BUFFER_SIZE 4096
-+	char *printf_buffer;
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/bug.h>
++#include <xen/interface/xen.h>
++#include <xen/features.h>
 +
-+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+	if (printf_buffer == NULL)
-+		return -ENOMEM;
++#ifdef CONFIG_X86_USE_3DNOW
 +
-+	va_start(ap, fmt);
-+	ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
-+	va_end(ap);
++#include <asm/mmx.h>
 +
-+	BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
-+	ret = xenbus_write(t, dir, node, printf_buffer);
++#define clear_page(page)	mmx_clear_page((void *)(page))
++#define copy_page(to,from)	mmx_copy_page(to,from)
 +
-+	kfree(printf_buffer);
++#else
 +
-+	return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_printf);
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 +
-+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
-+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
-+{
-+	va_list ap;
-+	const char *name;
-+	int ret = 0;
++/*
++ *	On older X86 processors it's not a win to use MMX here it seems.
++ *	Maybe the K6-III ?
++ */
++ 
++#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
++#define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
 +
-+	va_start(ap, dir);
-+	while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
-+		const char *fmt = va_arg(ap, char *);
-+		void *result = va_arg(ap, void *);
-+		char *p;
++#endif
 +
-+		p = xenbus_read(t, dir, name, NULL);
-+		if (IS_ERR(p)) {
-+			ret = PTR_ERR(p);
-+			break;
-+		}
-+		if (fmt) {
-+			if (sscanf(p, fmt, result) == 0)
-+				ret = -EINVAL;
-+			kfree(p);
-+		} else
-+			*(char **)result = p;
-+	}
-+	va_end(ap);
++#define clear_user_page(page, vaddr, pg)	clear_page(page)
++#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
++
++/*
++ * These are used to make use of C type-checking..
++ */
++extern int nx_enabled;
++#ifdef CONFIG_X86_PAE
++extern unsigned long long __supported_pte_mask;
++typedef struct { unsigned long pte_low, pte_high; } pte_t;
++typedef struct { unsigned long long pmd; } pmd_t;
++typedef struct { unsigned long long pgd; } pgd_t;
++typedef struct { unsigned long long pgprot; } pgprot_t;
++#define pgprot_val(x)	((x).pgprot)
++#include <asm/maddr.h>
++#define __pte(x) ({ unsigned long long _x = (x);			\
++    if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)		\
++        _x = pte_phys_to_machine(_x);					\
++    ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
++#define __pgd(x) ({ unsigned long long _x = (x); \
++    (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++#define __pmd(x) ({ unsigned long long _x = (x); \
++    (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++static inline unsigned long long __pte_val(pte_t x)
++{
++	return ((unsigned long long)x.pte_high << 32) | x.pte_low;
++}
++static inline unsigned long long pte_val(pte_t x)
++{
++	unsigned long long ret = __pte_val(x);
++	if ((x.pte_low & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
++		ret = pte_machine_to_phys(ret);
++	return ret;
++}
++#define __pmd_val(x) ((x).pmd)
++static inline unsigned long long pmd_val(pmd_t x)
++{
++	unsigned long long ret = __pmd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++	return ret;
++}
++#define __pud_val(x) __pgd_val((x).pgd)
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long long pgd_val(pgd_t x)
++{
++	unsigned long long ret = __pgd_val(x);
++	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++	return ret;
++}
++#define HPAGE_SHIFT	21
++#else
++typedef struct { unsigned long pte_low; } pte_t;
++typedef struct { unsigned long pgd; } pgd_t;
++typedef struct { unsigned long pgprot; } pgprot_t;
++#define pgprot_val(x)	((x).pgprot)
++#include <asm/maddr.h>
++#define boot_pte_t pte_t /* or would you rather have a typedef */
++#define __pte_val(x) ((x).pte_low)
++#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO))	\
++		    == _PAGE_PRESENT ?				\
++		    machine_to_phys(__pte_val(x)) :		\
++		    __pte_val(x))
++#define __pte(x) ({ unsigned long _x = (x);			\
++    if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)	\
++        _x = phys_to_machine(_x);				\
++    ((pte_t) { _x }); })
++#define __pmd_val(x) __pud_val((x).pud)
++#define __pud_val(x) __pgd_val((x).pgd)
++#define __pgd(x) ({ unsigned long _x = (x); \
++    (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long pgd_val(pgd_t x)
++{
++	unsigned long ret = __pgd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++	if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret);
++#endif
 +	return ret;
 +}
-+EXPORT_SYMBOL_GPL(xenbus_gather);
-+
-+static int xs_watch(const char *path, const char *token)
-+{
-+	struct kvec iov[2];
-+
-+	iov[0].iov_base = (void *)path;
-+	iov[0].iov_len = strlen(path) + 1;
-+	iov[1].iov_base = (void *)token;
-+	iov[1].iov_len = strlen(token) + 1;
-+
-+	return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
-+				 ARRAY_SIZE(iov), NULL));
-+}
-+
-+static int xs_unwatch(const char *path, const char *token)
-+{
-+	struct kvec iov[2];
++#define HPAGE_SHIFT	22
++#endif
++#define PTE_MASK	PHYSICAL_PAGE_MASK
 +
-+	iov[0].iov_base = (char *)path;
-+	iov[0].iov_len = strlen(path) + 1;
-+	iov[1].iov_base = (char *)token;
-+	iov[1].iov_len = strlen(token) + 1;
++#ifdef CONFIG_HUGETLB_PAGE
++#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
 +
-+	return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
-+				 ARRAY_SIZE(iov), NULL));
-+}
++#define __pgprot(x)	((pgprot_t) { (x) } )
 +
-+static struct xenbus_watch *find_watch(const char *token)
-+{
-+	struct xenbus_watch *i, *cmp;
++#endif /* !__ASSEMBLY__ */
 +
-+	cmp = (void *)simple_strtoul(token, NULL, 16);
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
 +
-+	list_for_each_entry(i, &watches, list)
-+		if (i == cmp)
-+			return i;
++/*
++ * This handles the memory map.. We could make this a config
++ * option, but too many people screw it up, and too few need
++ * it.
++ *
++ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
++ * a virtual address space of one gigabyte, which limits the
++ * amount of physical memory you can use to about 950MB. 
++ *
++ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
++ * and CONFIG_HIGHMEM64G options in the kernel configuration.
++ */
 +
-+	return NULL;
-+}
++#ifndef __ASSEMBLY__
 +
-+/* Register callback to watch this node. */
-+int register_xenbus_watch(struct xenbus_watch *watch)
-+{
-+	/* Pointer in ascii is the token. */
-+	char token[sizeof(watch) * 2 + 1];
-+	int err;
++struct vm_area_struct;
 +
-+	sprintf(token, "%lX", (long)watch);
++/*
++ * This much address space is reserved for vmalloc() and iomap()
++ * as well as fixmap mappings.
++ */
++extern unsigned int __VMALLOC_RESERVE;
 +
-+	down_read(&xs_state.watch_mutex);
++extern int sysctl_legacy_va_layout;
 +
-+	spin_lock(&watches_lock);
-+	BUG_ON(find_watch(token));
-+	list_add(&watch->list, &watches);
-+	spin_unlock(&watches_lock);
++extern int page_is_ram(unsigned long pagenr);
 +
-+	err = xs_watch(watch->node, token);
++#endif /* __ASSEMBLY__ */
 +
-+	/* Ignore errors due to multiple registration. */
-+	if ((err != 0) && (err != -EEXIST)) {
-+		spin_lock(&watches_lock);
-+		list_del(&watch->list);
-+		spin_unlock(&watches_lock);
-+	}
++#ifdef __ASSEMBLY__
++#define __PAGE_OFFSET		CONFIG_PAGE_OFFSET
++#define __PHYSICAL_START	CONFIG_PHYSICAL_START
++#else
++#define __PAGE_OFFSET		((unsigned long)CONFIG_PAGE_OFFSET)
++#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
++#endif
++#define __KERNEL_START		(__PAGE_OFFSET + __PHYSICAL_START)
 +
-+	up_read(&xs_state.watch_mutex);
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET		0
++#endif
 +
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(register_xenbus_watch);
++#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
++#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
++#define MAXMEM			(__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
++#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn)		((pfn) < max_mapnr)
++#endif /* CONFIG_FLATMEM */
++#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 +
-+void unregister_xenbus_watch(struct xenbus_watch *watch)
-+{
-+	struct xs_stored_msg *msg, *tmp;
-+	char token[sizeof(watch) * 2 + 1];
-+	int err;
++#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 +
-+	sprintf(token, "%lX", (long)watch);
++#define VM_DATA_DEFAULT_FLAGS \
++	(VM_READ | VM_WRITE | \
++	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 +
-+	down_read(&xs_state.watch_mutex);
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
 +
-+	spin_lock(&watches_lock);
-+	BUG_ON(!find_watch(token));
-+	list_del(&watch->list);
-+	spin_unlock(&watches_lock);
++#define __HAVE_ARCH_GATE_AREA 1
++#endif /* __KERNEL__ */
 +
-+	err = xs_unwatch(watch->node, token);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "XENBUS Failed to release watch %s: %i\n",
-+		       watch->node, err);
++#endif /* _I386_PAGE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pci.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pci.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,148 @@
++#ifndef __i386_PCI_H
++#define __i386_PCI_H
 +
-+	up_read(&xs_state.watch_mutex);
 +
-+	/* Cancel pending watch events. */
-+	spin_lock(&watch_events_lock);
-+	list_for_each_entry_safe(msg, tmp, &watch_events, list) {
-+		if (msg->u.watch.handle != watch)
-+			continue;
-+		list_del(&msg->list);
-+		kfree(msg->u.watch.vec);
-+		kfree(msg);
-+	}
-+	spin_unlock(&watch_events_lock);
++#ifdef __KERNEL__
++#include <linux/mm.h>		/* for struct page */
 +
-+	/* Flush any currently-executing callback, unless we are it. :-) */
-+	if (current->pid != xenwatch_pid) {
-+		mutex_lock(&xenwatch_mutex);
-+		mutex_unlock(&xenwatch_mutex);
-+	}
-+}
-+EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
++/* Can be used to override the logic in pci_scan_bus for skipping
++   already-configured bus numbers - to be used for buggy BIOSes
++   or architectures with incomplete PCI setup by the loader */
 +
-+void xs_suspend(void)
-+{
-+	down_write(&xs_state.transaction_mutex);
-+	down_write(&xs_state.watch_mutex);
-+	mutex_lock(&xs_state.request_mutex);
-+	mutex_lock(&xs_state.response_mutex);
-+}
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses()	0
++#endif
 +
-+void xs_resume(void)
-+{
-+	struct xenbus_watch *watch;
-+	char token[sizeof(watch) * 2 + 1];
++#include <asm/hypervisor.h>
++#define pcibios_scan_all_fns(a, b)	(!is_initial_xendomain())
 +
-+	mutex_unlock(&xs_state.response_mutex);
-+	mutex_unlock(&xs_state.request_mutex);
-+	up_write(&xs_state.transaction_mutex);
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO		0x1000
++#define PCIBIOS_MIN_MEM		(pci_mem_start)
 +
-+	/* No need for watches_lock: the watch_mutex is sufficient. */
-+	list_for_each_entry(watch, &watches, list) {
-+		sprintf(token, "%lX", (long)watch);
-+		xs_watch(watch->node, token);
-+	}
++#define PCIBIOS_MIN_CARDBUS_IO	0x4000
 +
-+	up_write(&xs_state.watch_mutex);
-+}
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
 +
-+void xs_suspend_cancel(void)
-+{
-+	mutex_unlock(&xs_state.response_mutex);
-+	mutex_unlock(&xs_state.request_mutex);
-+	up_write(&xs_state.watch_mutex);
-+	up_write(&xs_state.transaction_mutex);
-+}
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
 +
-+static int xenwatch_handle_callback(void *data)
-+{
-+	struct xs_stored_msg *msg = data;
++/* Dynamic DMA mapping stuff.
++ * i386 has everything mapped statically.
++ */
 +
-+	msg->u.watch.handle->callback(msg->u.watch.handle,
-+				      (const char **)msg->u.watch.vec,
-+				      msg->u.watch.vec_size);
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/io.h>
 +
-+	kfree(msg->u.watch.vec);
-+	kfree(msg);
++struct pci_dev;
 +
-+	/* Kill this kthread if we were spawned just for this callback. */
-+	if (current->pid != xenwatch_pid)
-+		do_exit(0);
++#ifdef CONFIG_SWIOTLB
 +
-+	return 0;
-+}
 +
-+static int xenwatch_thread(void *unused)
-+{
-+	struct list_head *ent;
-+	struct xs_stored_msg *msg;
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS	(0)
 +
-+	current->flags |= PF_NOFREEZE;
-+	for (;;) {
-+		wait_event_interruptible(watch_events_waitq,
-+					 !list_empty(&watch_events));
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
 +
-+		if (kthread_should_stop())
-+			break;
++#else
 +
-+		mutex_lock(&xenwatch_mutex);
++/* The PCI address space does equal the physical memory
++ * address space.  The networking and block device layers use
++ * this boolean for bounce buffer decisions.
++ */
++#define PCI_DMA_BUS_IS_PHYS	(1)
 +
-+		spin_lock(&watch_events_lock);
-+		ent = watch_events.next;
-+		if (ent != &watch_events)
-+			list_del(ent);
-+		spin_unlock(&watch_events_lock);
++/* pci_unmap_{page,single} is a nop so... */
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME)		(0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
 +
-+		if (ent != &watch_events) {
-+			msg = list_entry(ent, struct xs_stored_msg, list);
-+			if (msg->u.watch.handle->flags & XBWF_new_thread)
-+				kthread_run(xenwatch_handle_callback,
-+					    msg, "xenwatch_cb");
-+			else
-+				xenwatch_handle_callback(msg);
-+		}
++#endif
 +
-+		mutex_unlock(&xenwatch_mutex);
-+	}
++/* This is always fine. */
++#define pci_dac_dma_supported(pci_dev, mask)	(1)
 +
-+	return 0;
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++	return ((dma64_addr_t) page_to_phys(page) +
++		(dma64_addr_t) offset);
 +}
 +
-+static int process_msg(void)
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
 +{
-+	struct xs_stored_msg *msg;
-+	char *body;
-+	int err;
-+
-+	/*
-+	 * We must disallow save/restore while reading a xenstore message.
-+	 * A partial read across s/r leaves us out of sync with xenstored.
-+	 */
-+	for (;;) {
-+		err = xb_wait_for_data_to_read();
-+		if (err)
-+			return err;
-+		mutex_lock(&xs_state.response_mutex);
-+		if (xb_data_to_read())
-+			break;
-+		/* We raced with save/restore: pending data 'disappeared'. */
-+		mutex_unlock(&xs_state.response_mutex);
-+	}
-+
-+
-+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-+	if (msg == NULL) {
-+		err = -ENOMEM;
-+		goto out;
-+	}
-+
-+	err = xb_read(&msg->hdr, sizeof(msg->hdr));
-+	if (err) {
-+		kfree(msg);
-+		goto out;
-+	}
-+
-+	body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
-+	if (body == NULL) {
-+		kfree(msg);
-+		err = -ENOMEM;
-+		goto out;
-+	}
-+
-+	err = xb_read(body, msg->hdr.len);
-+	if (err) {
-+		kfree(body);
-+		kfree(msg);
-+		goto out;
-+	}
-+	body[msg->hdr.len] = '\0';
-+
-+	if (msg->hdr.type == XS_WATCH_EVENT) {
-+		msg->u.watch.vec = split(body, msg->hdr.len,
-+					 &msg->u.watch.vec_size);
-+		if (IS_ERR(msg->u.watch.vec)) {
-+			err = PTR_ERR(msg->u.watch.vec);
-+			kfree(msg);
-+			goto out;
-+		}
-+
-+		spin_lock(&watches_lock);
-+		msg->u.watch.handle = find_watch(
-+			msg->u.watch.vec[XS_WATCH_TOKEN]);
-+		if (msg->u.watch.handle != NULL) {
-+			spin_lock(&watch_events_lock);
-+			list_add_tail(&msg->list, &watch_events);
-+			wake_up(&watch_events_waitq);
-+			spin_unlock(&watch_events_lock);
-+		} else {
-+			kfree(msg->u.watch.vec);
-+			kfree(msg);
-+		}
-+		spin_unlock(&watches_lock);
-+	} else {
-+		msg->u.reply.body = body;
-+		spin_lock(&xs_state.reply_lock);
-+		list_add_tail(&msg->list, &xs_state.reply_list);
-+		spin_unlock(&xs_state.reply_lock);
-+		wake_up(&xs_state.reply_waitq);
-+	}
-+
-+ out:
-+	mutex_unlock(&xs_state.response_mutex);
-+	return err;
++	return pfn_to_page(dma_addr >> PAGE_SHIFT);
 +}
 +
-+static int xenbus_thread(void *unused)
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
 +{
-+	int err;
-+
-+	current->flags |= PF_NOFREEZE;
-+	for (;;) {
-+		err = process_msg();
-+		if (err)
-+			printk(KERN_WARNING "XENBUS error %d while reading "
-+			       "message\n", err);
-+		if (kthread_should_stop())
-+			break;
-+	}
-+
-+	return 0;
++	return (dma_addr & ~PAGE_MASK);
 +}
 +
-+int xs_init(void)
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
 +{
-+	int err;
-+	struct task_struct *task;
-+
-+	INIT_LIST_HEAD(&xs_state.reply_list);
-+	spin_lock_init(&xs_state.reply_lock);
-+	init_waitqueue_head(&xs_state.reply_waitq);
++}
 +
-+	mutex_init(&xs_state.request_mutex);
-+	mutex_init(&xs_state.response_mutex);
-+	init_rwsem(&xs_state.transaction_mutex);
-+	init_rwsem(&xs_state.watch_mutex);
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++	flush_write_buffers();
++}
 +
-+	/* Initialize the shared memory rings to talk to xenstored */
-+	err = xb_init_comms();
-+	if (err)
-+		return err;
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++			       enum pci_mmap_state mmap_state, int write_combine);
 +
-+	task = kthread_run(xenwatch_thread, NULL, "xenwatch");
-+	if (IS_ERR(task))
-+		return PTR_ERR(task);
-+	xenwatch_pid = task->pid;
 +
-+	task = kthread_run(xenbus_thread, NULL, "xenbus");
-+	if (IS_ERR(task))
-+		return PTR_ERR(task);
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
 +
-+	return 0;
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++					enum pci_dma_burst_strategy *strat,
++					unsigned long *strategy_parameter)
++{
++	*strat = PCI_DMA_BURST_INFINITY;
++	*strategy_parameter = ~0UL;
 +}
-diff -Nurp pristine-linux-2.6.18.2/drivers/xen/xenoprof/xenoprofile.c tmp-linux-2.6-xen.patch/drivers/xen/xenoprof/xenoprofile.c
---- pristine-linux-2.6.18.2/drivers/xen/xenoprof/xenoprofile.c	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/drivers/xen/xenoprof/xenoprofile.c	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,500 @@
-+/**
-+ * @file xenoprofile.c
-+ *
-+ * @remark Copyright 2002 OProfile authors
-+ * @remark Read the file COPYING
-+ *
-+ * @author John Levon <levon at movementarian.org>
-+ *
-+ * Modified by Aravind Menon and Jose Renato Santos for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
-+ * Separated out arch-generic part
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
-+ */
++#endif
 +
-+#include <linux/init.h>
-+#include <linux/notifier.h>
-+#include <linux/smp.h>
-+#include <linux/oprofile.h>
-+#include <linux/sysdev.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/vmalloc.h>
-+#include <asm/pgtable.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenoprof.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/xenoprof.h>
-+#include "../../../drivers/oprofile/cpu_buffer.h"
-+#include "../../../drivers/oprofile/event_buffer.h"
++#endif /* __KERNEL__ */
 +
-+#define MAX_XENOPROF_SAMPLES 16
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++#include <xen/pcifront.h>
++#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
 +
-+/* sample buffers shared with Xen */
-+xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
-+/* Shared buffer area */
-+struct xenoprof_shared_buffer shared_buffer;
++/* implement the pci_ DMA API in terms of the generic device dma_ one */
++#include <asm-generic/pci-dma-compat.h>
 +
-+/* Passive sample buffers shared with Xen */
-+xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
-+/* Passive shared buffer area */
-+struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
++/* generic pci stuff */
++#include <asm-generic/pci.h>
 +
-+static int xenoprof_start(void);
-+static void xenoprof_stop(void);
++#endif /* __i386_PCI_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgalloc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgalloc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,59 @@
++#ifndef _I386_PGALLOC_H
++#define _I386_PGALLOC_H
 +
-+static int xenoprof_enabled = 0;
-+static int xenoprof_is_primary = 0;
-+static int active_defined;
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++#include <linux/mm.h>		/* for struct page */
++#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
 +
-+/* Number of buffers in shared area (one per VCPU) */
-+int nbuf;
-+/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
-+int ovf_irq[NR_CPUS];
-+/* cpu model type string - copied from Xen memory space on XENOPROF_init command */
-+char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++#define pmd_populate_kernel(mm, pmd, pte) \
++		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
 +
-+#ifdef CONFIG_PM
++#define pmd_populate(mm, pmd, pte) 					\
++do {									\
++	unsigned long pfn = page_to_pfn(pte);				\
++	if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) {	\
++		if (!PageHighMem(pte))					\
++			BUG_ON(HYPERVISOR_update_va_mapping(		\
++			  (unsigned long)__va(pfn << PAGE_SHIFT),	\
++			  pfn_pte(pfn, PAGE_KERNEL_RO), 0));		\
++		else if (!test_and_set_bit(PG_pinned, &pte->flags))	\
++			kmap_flush_unused();				\
++		set_pmd(pmd,						\
++		        __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
++	} else							\
++		*(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
++} while (0)
 +
-+static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
-+{
-+	if (xenoprof_enabled == 1)
-+		xenoprof_stop();
-+	return 0;
-+}
++/*
++ * Allocate and free page tables.
++ */
++extern pgd_t *pgd_alloc(struct mm_struct *);
++extern void pgd_free(pgd_t *pgd);
 +
++extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
++extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
 +
-+static int xenoprof_resume(struct sys_device * dev)
++static inline void pte_free_kernel(pte_t *pte)
 +{
-+	if (xenoprof_enabled == 1)
-+		xenoprof_start();
-+	return 0;
++	make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
++	free_page((unsigned long)pte);
 +}
 +
++extern void pte_free(struct page *pte);
 +
-+static struct sysdev_class oprofile_sysclass = {
-+	set_kset_name("oprofile"),
-+	.resume		= xenoprof_resume,
-+	.suspend	= xenoprof_suspend
-+};
-+
-+
-+static struct sys_device device_oprofile = {
-+	.id	= 0,
-+	.cls	= &oprofile_sysclass,
-+};
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
 +
++#ifdef CONFIG_X86_PAE
++/*
++ * In the PAE case we free the pmds as part of the pgd.
++ */
++#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
++#define pmd_free(x)			do { } while (0)
++#define __pmd_free_tlb(tlb,x)		do { } while (0)
++#define pud_populate(mm, pmd, pte)	BUG()
++#endif
 +
-+static int __init init_driverfs(void)
-+{
-+	int error;
-+	if (!(error = sysdev_class_register(&oprofile_sysclass)))
-+		error = sysdev_register(&device_oprofile);
-+	return error;
-+}
++#define check_pgt_cache()	do { } while (0)
 +
++#endif /* _I386_PGALLOC_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgtable-2level-defs.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,20 @@
++#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
++#define _I386_PGTABLE_2LEVEL_DEFS_H
 +
-+static void exit_driverfs(void)
-+{
-+	sysdev_unregister(&device_oprofile);
-+	sysdev_class_unregister(&oprofile_sysclass);
-+}
++#define HAVE_SHARED_KERNEL_PMD 0
 +
-+#else
-+#define init_driverfs() do { } while (0)
-+#define exit_driverfs() do { } while (0)
-+#endif /* CONFIG_PM */
++/*
++ * traditional i386 two-level paging structure:
++ */
 +
-+unsigned long long oprofile_samples = 0;
-+unsigned long long p_oprofile_samples = 0;
++#define PGDIR_SHIFT	22
++#define PTRS_PER_PGD	1024
 +
-+unsigned int pdomains;
-+struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++/*
++ * the i386 is two-level, so we don't really have any
++ * PMD directory physically.
++ */
 +
-+static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
-+{
-+	int head, tail, size;
++#define PTRS_PER_PTE	1024
 +
-+	head = buf->event_head;
-+	tail = buf->event_tail;
-+	size = buf->event_size;
++#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgtable-2level.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-2level.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,121 @@
++#ifndef _I386_PGTABLE_2LEVEL_H
++#define _I386_PGTABLE_2LEVEL_H
 +
-+	if (tail > head) {
-+		while (tail < size) {
-+			oprofile_add_pc(buf->event_log[tail].eip,
-+					buf->event_log[tail].mode,
-+					buf->event_log[tail].event);
-+			if (!is_passive)
-+				oprofile_samples++;
-+			else
-+				p_oprofile_samples++;
-+			tail++;
-+		}
-+		tail = 0;
-+	}
-+	while (tail < head) {
-+		oprofile_add_pc(buf->event_log[tail].eip,
-+				buf->event_log[tail].mode,
-+				buf->event_log[tail].event);
-+		if (!is_passive)
-+			oprofile_samples++;
-+		else
-+			p_oprofile_samples++;
-+		tail++;
-+	}
++#include <asm-generic/pgtable-nopmd.h>
 +
-+	buf->event_tail = tail;
-+}
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++	       __pte_val(e), pte_pfn(e))
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++	       __pgd_val(e), pgd_val(e) >> PAGE_SHIFT)
 +
-+static void xenoprof_handle_passive(void)
-+{
-+	int i, j;
-+	int flag_domain, flag_switch = 0;
-+	
-+	for (i = 0; i < pdomains; i++) {
-+		flag_domain = 0;
-+		for (j = 0; j < passive_domains[i].nbuf; j++) {
-+			xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
-+			if (buf->event_head == buf->event_tail)
-+				continue;
-+			if (!flag_domain) {
-+				if (!oprofile_add_domain_switch(passive_domains[i].
-+								domain_id))
-+					goto done;
-+				flag_domain = 1;
-+			}
-+			xenoprof_add_pc(buf, 1);
-+			flag_switch = 1;
-+		}
-+	}
-+done:
-+	if (flag_switch)
-+		oprofile_add_domain_switch(COORDINATOR_DOMAIN);
-+}
++/*
++ * Certain architectures need to do special things when PTEs
++ * within a page table are directly modified.  Thus, the following
++ * hook is made available.
++ */
++#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 +
-+static irqreturn_t 
-+xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
-+{
-+	struct xenoprof_buf * buf;
-+	int cpu;
-+	static unsigned long flag;
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
 +
-+	cpu = smp_processor_id();
-+	buf = xenoprof_buf[cpu];
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++		set_pte((ptep), (pteval));				\
++		xen_invlpg((addr));					\
++	}								\
++} while (0)
 +
-+	xenoprof_add_pc(buf, 0);
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
 +
-+	if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
-+		xenoprof_handle_passive();
-+		smp_mb__before_clear_bit();
-+		clear_bit(0, &flag);
-+	}
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
 +
-+	return IRQ_HANDLED;
-+}
++#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 +
++#define pte_none(x) (!(x).pte_low)
 +
-+static void unbind_virq(void)
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 +{
-+	int i;
-+
-+	for_each_online_cpu(i) {
-+		if (ovf_irq[i] >= 0) {
-+			unbind_from_irqhandler(ovf_irq[i], NULL);
-+			ovf_irq[i] = -1;
-+		}
++	pte_t pte = *ptep;
++	if (!pte_none(pte)) {
++		if ((mm != &init_mm) ||
++		    HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
++			pte = __pte_ma(xchg(&ptep->pte_low, 0));
 +	}
++	return pte;
 +}
 +
++#define ptep_clear_flush(vma, addr, ptep)			\
++({								\
++	pte_t *__ptep = (ptep);					\
++	pte_t __res = *__ptep;					\
++	if (!pte_none(__res) &&					\
++	    ((vma)->vm_mm != current->mm ||			\
++	     HYPERVISOR_update_va_mapping(addr, __pte(0),	\
++			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++				UVMF_INVLPG|UVMF_MULTI))) {	\
++		__ptep->pte_low = 0;				\
++		flush_tlb_page(vma, addr);			\
++	}							\
++	__res;							\
++})
 +
-+static int bind_virq(void)
-+{
-+	int i, result;
-+
-+	for_each_online_cpu(i) {
-+		result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
-+						 i,
-+						 xenoprof_ovf_interrupt,
-+						 SA_INTERRUPT,
-+						 "xenoprof",
-+						 NULL);
++#define pte_same(a, b)		((a).pte_low == (b).pte_low)
 +
-+		if (result < 0) {
-+			unbind_virq();
-+			return result;
-+		}
++#define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr :	\
++		       (_pte).pte_low & _PAGE_PRESENT ?		\
++		       mfn_to_local_pfn(__pte_mfn(_pte)) :	\
++		       __pte_mfn(_pte))
 +
-+		ovf_irq[i] = result;
-+	}
-+		
-+	return 0;
-+}
++#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
 +
++#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +
-+static void unmap_passive_list(void)
++/*
++ * All present user pages are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
 +{
-+	int i;
-+	for (i = 0; i < pdomains; i++)
-+		xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
-+	pdomains = 0;
++	return pte_user(pte);
 +}
 +
-+
-+static int map_xenoprof_buffer(int max_samples)
++/*
++ * All present pages are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
 +{
-+	struct xenoprof_get_buffer get_buffer;
-+	struct xenoprof_buf *buf;
-+	int ret, i;
-+
-+	if ( shared_buffer.buffer )
-+		return 0;
-+
-+	get_buffer.max_samples = max_samples;
-+	ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
-+	if (ret)
-+		return ret;
-+	nbuf = get_buffer.nbuf;
-+
-+	for (i=0; i< nbuf; i++) {
-+		buf = (struct xenoprof_buf*) 
-+			&shared_buffer.buffer[i * get_buffer.bufsize];
-+		BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+		xenoprof_buf[buf->vcpu_id] = buf;
-+	}
-+
-+	return 0;
++	return 1;
 +}
 +
++/*
++ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
++ * into this range:
++ */
++#define PTE_FILE_MAX_BITS	29
 +
-+static int xenoprof_setup(void)
-+{
-+	int ret;
-+
-+	if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
-+		return ret;
++#define pte_to_pgoff(pte) \
++	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
 +
-+	if ( (ret = bind_virq()) )
-+		return ret;
++#define pgoff_to_pte(off) \
++	((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
 +
-+	if (xenoprof_is_primary) {
-+		/* Define dom0 as an active domain if not done yet */
-+		if (!active_defined) {
-+			domid_t domid;
-+			ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+			if (ret)
-+				goto err;
-+			domid = 0;
-+			ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+			if (ret)
-+				goto err;
-+			active_defined = 1;
-+		}
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val >> 1) & 0x1f)
++#define __swp_offset(x)			((x).val >> 8)
++#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
++#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
 +
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
-+		if (ret)
-+			goto err;
-+		xenoprof_arch_counter();
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++void vmalloc_sync_all(void);
 +
-+		if (ret)
-+			goto err;
-+	}
++#endif /* _I386_PGTABLE_2LEVEL_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,24 @@
++#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
++#define _I386_PGTABLE_3LEVEL_DEFS_H
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
-+	if (ret)
-+		goto err;
++#define HAVE_SHARED_KERNEL_PMD 0
 +
-+	xenoprof_enabled = 1;
-+	return 0;
-+ err:
-+	unbind_virq();
-+	return ret;
-+}
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT	30
++#define PTRS_PER_PGD	4
 +
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT	21
++#define PTRS_PER_PMD	512
 +
-+static void xenoprof_shutdown(void)
-+{
-+	xenoprof_enabled = 0;
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE	512
 +
-+	HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
++#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgtable-3level.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-3level.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,211 @@
++#ifndef _I386_PGTABLE_3LEVEL_H
++#define _I386_PGTABLE_3LEVEL_H
 +
-+	if (xenoprof_is_primary) {
-+		HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
-+		active_defined = 0;
-+	}
++#include <asm-generic/pgtable-nopud.h>
 +
-+	unbind_virq();
++/*
++ * Intel Physical Address Extension (PAE) Mode - three-level page
++ * tables on PPro+ CPUs.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ */
 +
-+	xenoprof_arch_unmap_shared_buffer(&shared_buffer);
-+	if (xenoprof_is_primary)
-+		unmap_passive_list();
-+}
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
++	       &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++	printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++	       &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++	       &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
 +
++#define pud_none(pud)				0
++#define pud_bad(pud)				0
++#define pud_present(pud)			1
 +
-+static int xenoprof_start(void)
++/*
++ * Is the pte executable?
++ */
++static inline int pte_x(pte_t pte)
 +{
-+	int ret = 0;
-+
-+	if (xenoprof_is_primary)
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
-+	if (!ret)
-+		xenoprof_arch_start();
-+	return ret;
++	return !(__pte_val(pte) & _PAGE_NX);
 +}
 +
-+
-+static void xenoprof_stop(void)
++/*
++ * All present user-pages with !NX bit are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
 +{
-+	if (xenoprof_is_primary)
-+		HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
-+	xenoprof_arch_stop();
++	return pte_user(pte) && pte_x(pte);
++}
++/*
++ * All present pages with !NX bit are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++	return pte_x(pte);
 +}
 +
++/* Rules for using set_pte: the pte being assigned *must* be
++ * either not present or in a state where the hardware will
++ * not attempt to update the pte.  In places where this is
++ * not possible, use pte_get_and_clear to obtain the old pte
++ * value and then use set_pte to update it.  -ben
++ */
++#define __HAVE_ARCH_SET_PTE_ATOMIC
 +
-+static int xenoprof_set_active(int * active_domains,
-+			       unsigned int adomains)
++static inline void set_pte(pte_t *ptep, pte_t pte)
 +{
-+	int ret = 0;
-+	int i;
-+	int set_dom0 = 0;
-+	domid_t domid;
++	ptep->pte_high = pte.pte_high;
++	smp_wmb();
++	ptep->pte_low = pte.pte_low;
++}
++#define set_pte_atomic(pteptr,pteval) \
++		set_64bit((unsigned long long *)(pteptr),__pte_val(pteval))
 +
-+	if (!xenoprof_is_primary)
-+		return 0;
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
 +
-+	if (adomains > MAX_OPROF_DOMAINS)
-+		return -E2BIG;
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++		set_pte((ptep), (pteval));				\
++		xen_invlpg((addr));					\
++	}								\
++} while (0)
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+	if (ret)
-+		return ret;
++#define set_pmd(pmdptr,pmdval)				\
++		xen_l2_entry_update((pmdptr), (pmdval))
++#define set_pud(pudptr,pudval) \
++		xen_l3_entry_update((pudptr), (pudval))
 +
-+	for (i=0; i<adomains; i++) {
-+		domid = active_domains[i];
-+		if (domid != active_domains[i]) {
-+			ret = -EINVAL;
-+			goto out;
-+		}
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+		if (ret)
-+			goto out;
-+		if (active_domains[i] == 0)
-+			set_dom0 = 1;
-+	}
-+	/* dom0 must always be active but may not be in the list */ 
-+	if (!set_dom0) {
-+		domid = 0;
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+	}
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
 +
-+out:
-+	if (ret)
-+		HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+	active_defined = !ret;
-+	return ret;
-+}
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
 +
-+static int xenoprof_set_passive(int * p_domains,
-+                                unsigned int pdoms)
-+{
-+	int ret;
-+	int i, j;
-+	struct xenoprof_buf *buf;
++#define pud_page_kernel(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
 +
-+	if (!xenoprof_is_primary)
-+        	return 0;
 +
-+	if (pdoms > MAX_OPROF_DOMAINS)
-+		return -E2BIG;
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++			pmd_index(address))
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
-+	if (ret)
-+		return ret;
-+	unmap_passive_list();
++static inline int pte_none(pte_t pte)
++{
++	return !(pte.pte_low | pte.pte_high);
++}
 +
-+	for (i = 0; i < pdoms; i++) {
-+		passive_domains[i].domain_id = p_domains[i];
-+		passive_domains[i].max_samples = 2048;
-+		ret = xenoprof_arch_set_passive(&passive_domains[i],
-+						&p_shared_buffer[i]);
-+		if (ret)
-+			goto out;
-+		for (j = 0; j < passive_domains[i].nbuf; j++) {
-+			buf = (struct xenoprof_buf *)
-+				&p_shared_buffer[i].buffer[j * passive_domains[i].bufsize];
-+			BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+			p_xenoprof_buf[i][buf->vcpu_id] = buf;
-+		}
++/*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	if ((mm != current->mm && mm != &init_mm)
++	    || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++		ptep->pte_low = 0;
++		smp_wmb();
++		ptep->pte_high = 0;
 +	}
++}
 +
-+	pdomains = pdoms;
-+	return 0;
++#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 +
-+out:
-+	for (j = 0; j < i; j++)
-+		xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	if (!pte_none(pte)) {
++		if ((mm != &init_mm) ||
++		    HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++			uint64_t val = __pte_val(pte);
++			if (__cmpxchg64(ptep, val, 0) != val) {
++				/* xchg acts as a barrier before the setting of the high bits */
++				pte.pte_low = xchg(&ptep->pte_low, 0);
++				pte.pte_high = ptep->pte_high;
++				ptep->pte_high = 0;
++			}
++		}
++	}
++	return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep)			\
++({								\
++	pte_t *__ptep = (ptep);					\
++	pte_t __res = *__ptep;					\
++	if (!pte_none(__res) &&					\
++	    ((vma)->vm_mm != current->mm ||			\
++	     HYPERVISOR_update_va_mapping(addr,	__pte(0),	\
++			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++				UVMF_INVLPG|UVMF_MULTI))) {	\
++		__ptep->pte_low = 0;				\
++		smp_wmb();					\
++		__ptep->pte_high = 0;				\
++		flush_tlb_page(vma, addr);			\
++	}							\
++	__res;							\
++})
 +
-+ 	return ret;
++static inline int pte_same(pte_t a, pte_t b)
++{
++	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
 +}
 +
-+struct oprofile_operations xenoprof_ops = {
-+#ifdef HAVE_XENOPROF_CREATE_FILES
-+	.create_files 	= xenoprof_create_files,
-+#endif
-+	.set_active	= xenoprof_set_active,
-+	.set_passive    = xenoprof_set_passive,
-+	.setup 		= xenoprof_setup,
-+	.shutdown	= xenoprof_shutdown,
-+	.start		= xenoprof_start,
-+	.stop		= xenoprof_stop
-+};
++#define pte_page(x)	pfn_to_page(pte_pfn(x))
 +
++#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
++			 ((_pte).pte_high << (32-PAGE_SHIFT)))
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr :	\
++		       (_pte).pte_low & _PAGE_PRESENT ?		\
++		       mfn_to_local_pfn(__pte_mfn(_pte)) :	\
++		       __pte_mfn(_pte))
 +
-+/* in order to get driverfs right */
-+static int using_xenoprof;
++extern unsigned long long __supported_pte_mask;
 +
-+int __init xenoprofile_init(struct oprofile_operations * ops)
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 +{
-+	struct xenoprof_init init;
-+	int ret, i;
++	return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
++			pgprot_val(pgprot)) & __supported_pte_mask);
++}
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
-+	if (!ret) {
-+		xenoprof_arch_init_counter(&init);
-+		xenoprof_is_primary = init.is_primary;
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++{
++	return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
++			pgprot_val(pgprot)) & __supported_pte_mask);
++}
 +
-+		/*  cpu_type is detected by Xen */
-+		cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
-+		strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
-+		xenoprof_ops.cpu_type = cpu_type;
++/*
++ * Bits 0, 6 and 7 are taken in the low part of the pte,
++ * put the 32 bits of offset into the high part.
++ */
++#define pte_to_pgoff(pte) ((pte).pte_high)
++#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
++#define PTE_FILE_MAX_BITS       32
 +
-+		init_driverfs();
-+		using_xenoprof = 1;
-+		*ops = xenoprof_ops;
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val) & 0x1f)
++#define __swp_offset(x)			((x).val >> 5)
++#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
++#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
++#define __swp_entry_to_pte(x)		((pte_t){ 0, (x).val })
 +
-+		for (i=0; i<NR_CPUS; i++)
-+			ovf_irq[i] = -1;
++#define __pmd_free_tlb(tlb, x)		do { } while (0)
 +
-+		active_defined = 0;
-+	}
-+	printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
-+	       __func__, ret, init.num_events, xenoprof_is_primary);
-+	return ret;
-+}
++void vmalloc_sync_all(void);
 +
++#endif /* _I386_PGTABLE_3LEVEL_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/pgtable.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,537 @@
++#ifndef _I386_PGTABLE_H
++#define _I386_PGTABLE_H
 +
-+void xenoprofile_exit(void)
-+{
-+	if (using_xenoprof)
-+		exit_driverfs();
++#include <asm/hypervisor.h>
 +
-+	xenoprof_arch_unmap_shared_buffer(&shared_buffer);
-+	if (xenoprof_is_primary) {
-+		unmap_passive_list();
-+		HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
-+        }
-+}
-diff -Nurp pristine-linux-2.6.18.2/fs/aio.c tmp-linux-2.6-xen.patch/fs/aio.c
---- pristine-linux-2.6.18.2/fs/aio.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/fs/aio.c	2007-10-14 01:51:15.000000000 +0200
-@@ -34,6 +34,11 @@
- #include <asm/uaccess.h>
- #include <asm/mmu_context.h>
- 
-+#ifdef CONFIG_EPOLL
-+#include <linux/poll.h>
-+#include <linux/eventpoll.h>
-+#endif
++/*
++ * The Linux memory management assumes a three-level page table setup. On
++ * the i386, we use that, but "fold" the mid level into the top-level page
++ * table, so that we physically have the same two-level page table as the
++ * i386 mmu expects.
++ *
++ * This file contains the functions and defines necessary to modify and use
++ * the i386 page table tree.
++ */
++#ifndef __ASSEMBLY__
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
 +
- #if DEBUG > 1
- #define dprintk		printk
- #else
-@@ -1015,6 +1020,10 @@ put_rq:
- 	if (waitqueue_active(&ctx->wait))
- 		wake_up(&ctx->wait);
- 
-+#ifdef CONFIG_EPOLL
-+	if (ctx->file && waitqueue_active(&ctx->poll_wait))
-+		wake_up(&ctx->poll_wait);
-+#endif
- 	if (ret)
- 		put_ioctx(ctx);
- 
-@@ -1024,6 +1033,8 @@ put_rq:
- /* aio_read_evt
-  *	Pull an event off of the ioctx's event ring.  Returns the number of 
-  *	events fetched (0 or 1 ;-)
-+ *	If ent parameter is 0, just returns the number of events that would
-+ *	be fetched.
-  *	FIXME: make this use cmpxchg.
-  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
-  */
-@@ -1046,13 +1057,18 @@ static int aio_read_evt(struct kioctx *i
- 
- 	head = ring->head % info->nr;
- 	if (head != ring->tail) {
--		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
--		*ent = *evp;
--		head = (head + 1) % info->nr;
--		smp_mb(); /* finish reading the event before updatng the head */
--		ring->head = head;
--		ret = 1;
--		put_aio_ring_event(evp, KM_USER1);
-+		if (ent) { /* event requested */
-+			struct io_event *evp =
-+				aio_ring_event(info, head, KM_USER1);
-+			*ent = *evp;
-+			head = (head + 1) % info->nr;
-+			/* finish reading the event before updatng the head */
-+			smp_mb();
-+			ring->head = head;
-+			ret = 1;
-+			put_aio_ring_event(evp, KM_USER1);
-+		} else /* only need to know availability */
-+			ret = 1;
- 	}
- 	spin_unlock(&info->ring_lock);
- 
-@@ -1235,9 +1251,78 @@ static void io_destroy(struct kioctx *io
- 
- 	aio_cancel_all(ioctx);
- 	wait_for_all_aios(ioctx);
-+#ifdef CONFIG_EPOLL
-+	/* forget the poll file, but it's up to the user to close it */
-+	if (ioctx->file) {
-+		ioctx->file->private_data = 0;
-+		ioctx->file = 0;
-+	}
++#ifndef _I386_BITOPS_H
++#include <asm/bitops.h>
 +#endif
- 	put_ioctx(ioctx);	/* once for the lookup */
- }
- 
-+#ifdef CONFIG_EPOLL
-+
-+static int aio_queue_fd_close(struct inode *inode, struct file *file)
-+{
-+	struct kioctx *ioctx = file->private_data;
-+	if (ioctx) {
-+		file->private_data = 0;
-+		spin_lock_irq(&ioctx->ctx_lock);
-+		ioctx->file = 0;
-+		spin_unlock_irq(&ioctx->ctx_lock);
-+	}
-+	return 0;
-+}
-+
-+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
-+{	unsigned int pollflags = 0;
-+	struct kioctx *ioctx = file->private_data;
 +
-+	if (ioctx) {
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
 +
-+		spin_lock_irq(&ioctx->ctx_lock);
-+		/* Insert inside our poll wait queue */
-+		poll_wait(file, &ioctx->poll_wait, wait);
++/* Is this pagetable pinned? */
++#define PG_pinned	PG_arch_1
 +
-+		/* Check our condition */
-+		if (aio_read_evt(ioctx, 0))
-+			pollflags = POLLIN | POLLRDNORM;
-+		spin_unlock_irq(&ioctx->ctx_lock);
-+	}
++struct mm_struct;
++struct vm_area_struct;
 +
-+	return pollflags;
-+}
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t *swapper_pg_dir;
++extern kmem_cache_t *pgd_cache;
++extern kmem_cache_t *pmd_cache;
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
 +
-+static const struct file_operations aioq_fops = {
-+	.release	= aio_queue_fd_close,
-+	.poll		= aio_queue_fd_poll
-+};
++void pmd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pgtable_cache_init(void);
++void paging_init(void);
 +
-+/* make_aio_fd:
-+ *  Create a file descriptor that can be used to poll the event queue.
-+ *  Based and piggybacked on the excellent epoll code.
++/*
++ * The Linux x86 paging architecture is 'compile-time dual-mode', it
++ * implements both the traditional 2-level x86 page tables and the
++ * newer 3-level PAE-mode page tables.
 + */
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level-defs.h>
++# define PMD_SIZE	(1UL << PMD_SHIFT)
++# define PMD_MASK	(~(PMD_SIZE-1))
++#else
++# include <asm/pgtable-2level-defs.h>
++#endif
 +
-+static int make_aio_fd(struct kioctx *ioctx)
-+{
-+	int error, fd;
-+	struct inode *inode;
-+	struct file *file;
++#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
++#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 +
-+	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
-+	if (error)
-+		return error;
++#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS	0
 +
-+	/* associate the file with the IO context */
-+	file->private_data = ioctx;
-+	ioctx->file = file;
-+	init_waitqueue_head(&ioctx->poll_wait);
-+	return fd;
-+}
-+#endif
++#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
++#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 +
++#define TWOLEVEL_PGDIR_SHIFT	22
++#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
++#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
 +
- /* sys_io_setup:
-  *	Create an aio_context capable of receiving at least nr_events.
-  *	ctxp must not point to an aio_context that already exists, and
-@@ -1250,18 +1335,30 @@ static void io_destroy(struct kioctx *io
-  *	resources are available.  May fail with -EFAULT if an invalid
-  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
-  *	implemented.
-+ *
-+ *	To request a selectable fd, the user context has to be initialized
-+ *	to 1, instead of 0, and the return value is the fd.
-+ *	This keeps the system call compatible, since a non-zero value
-+ *	was not allowed so far.
-  */
- asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
- {
- 	struct kioctx *ioctx = NULL;
- 	unsigned long ctx;
- 	long ret;
-+	int make_fd = 0;
- 
- 	ret = get_user(ctx, ctxp);
- 	if (unlikely(ret))
- 		goto out;
- 
- 	ret = -EINVAL;
-+#ifdef CONFIG_EPOLL
-+	if (ctx == 1) {
-+		make_fd = 1;
-+		ctx = 0;
-+	}
-+#endif
- 	if (unlikely(ctx || nr_events == 0)) {
- 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
- 		         ctx, nr_events);
-@@ -1272,8 +1369,12 @@ asmlinkage long sys_io_setup(unsigned nr
- 	ret = PTR_ERR(ioctx);
- 	if (!IS_ERR(ioctx)) {
- 		ret = put_user(ioctx->user_id, ctxp);
--		if (!ret)
--			return 0;
-+#ifdef CONFIG_EPOLL
-+		if (make_fd && ret >= 0)
-+			ret = make_aio_fd(ioctx);
-+#endif
-+		if (ret >= 0)
-+			return ret;
- 
- 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
- 		io_destroy(ioctx);
-diff -Nurp pristine-linux-2.6.18.2/fs/eventpoll.c tmp-linux-2.6-xen.patch/fs/eventpoll.c
---- pristine-linux-2.6.18.2/fs/eventpoll.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/fs/eventpoll.c	2007-10-14 01:51:15.000000000 +0200
-@@ -236,8 +236,6 @@ struct ep_pqueue {
- 
- static void ep_poll_safewake_init(struct poll_safewake *psw);
- static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
--static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
--		    struct eventpoll *ep);
- static int ep_alloc(struct eventpoll **pep);
- static void ep_free(struct eventpoll *ep);
- static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
-@@ -267,7 +265,7 @@ static int ep_events_transfer(struct eve
- static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
- 		   int maxevents, long timeout);
- static int eventpollfs_delete_dentry(struct dentry *dentry);
--static struct inode *ep_eventpoll_inode(void);
-+static struct inode *ep_eventpoll_inode(const struct file_operations *fops);
- static int eventpollfs_get_sb(struct file_system_type *fs_type,
- 			      int flags, const char *dev_name,
- 			      void *data, struct vfsmount *mnt);
-@@ -517,7 +515,7 @@ asmlinkage long sys_epoll_create(int siz
- 	 * Creates all the items needed to setup an eventpoll file. That is,
- 	 * a file structure, and inode and a free file descriptor.
- 	 */
--	error = ep_getfd(&fd, &inode, &file, ep);
-+	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
- 	if (error)
- 		goto eexit_2;
- 
-@@ -702,8 +700,8 @@ eexit_1:
- /*
-  * Creates the file descriptor to be used by the epoll interface.
-  */
--static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
--		    struct eventpoll *ep)
-+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
-+		    struct eventpoll *ep, const struct file_operations *fops)
- {
- 	struct qstr this;
- 	char name[32];
-@@ -719,7 +717,7 @@ static int ep_getfd(int *efd, struct ino
- 		goto eexit_1;
- 
- 	/* Allocates an inode from the eventpoll file system */
--	inode = ep_eventpoll_inode();
-+	inode = ep_eventpoll_inode(fops);
- 	error = PTR_ERR(inode);
- 	if (IS_ERR(inode))
- 		goto eexit_2;
-@@ -750,7 +748,7 @@ static int ep_getfd(int *efd, struct ino
- 
- 	file->f_pos = 0;
- 	file->f_flags = O_RDONLY;
--	file->f_op = &eventpoll_fops;
-+	file->f_op = fops;
- 	file->f_mode = FMODE_READ;
- 	file->f_version = 0;
- 	file->private_data = ep;
-@@ -1569,7 +1567,7 @@ static int eventpollfs_delete_dentry(str
- }
- 
- 
--static struct inode *ep_eventpoll_inode(void)
-+static struct inode *ep_eventpoll_inode(const struct file_operations *fops)
- {
- 	int error = -ENOMEM;
- 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
-@@ -1577,7 +1575,7 @@ static struct inode *ep_eventpoll_inode(
- 	if (!inode)
- 		goto eexit_1;
- 
--	inode->i_fop = &eventpoll_fops;
-+	inode->i_fop = fops;
- 
- 	/*
- 	 * Mark the inode dirty from the very beginning,
-diff -Nurp pristine-linux-2.6.18.2/fs/Kconfig tmp-linux-2.6-xen.patch/fs/Kconfig
---- pristine-linux-2.6.18.2/fs/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/fs/Kconfig	2007-07-30 16:35:12.000000000 +0200
-@@ -865,6 +865,7 @@ config TMPFS
- config HUGETLBFS
- 	bool "HugeTLB file system support"
- 	depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
-+	depends !XEN
- 	help
- 	  hugetlbfs is a filesystem backing for HugeTLB pages, based on
- 	  ramfs. For architectures that support it, say Y here and read
-diff -Nurp pristine-linux-2.6.18.2/fs/proc/proc_misc.c tmp-linux-2.6-xen.patch/fs/proc/proc_misc.c
---- pristine-linux-2.6.18.2/fs/proc/proc_misc.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/fs/proc/proc_misc.c	2007-10-14 01:51:15.000000000 +0200
-@@ -471,7 +471,7 @@ static int show_stat(struct seq_file *p,
- 		(unsigned long long)cputime64_to_clock_t(irq),
- 		(unsigned long long)cputime64_to_clock_t(softirq),
- 		(unsigned long long)cputime64_to_clock_t(steal));
--	for_each_online_cpu(i) {
-+	for_each_possible_cpu(i) {
- 
- 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- 		user = kstat_cpu(i).cpustat.user;
-diff -Nurp pristine-linux-2.6.18.2/fs/proc/vmcore.c tmp-linux-2.6-xen.patch/fs/proc/vmcore.c
---- pristine-linux-2.6.18.2/fs/proc/vmcore.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/fs/proc/vmcore.c	2007-10-14 01:51:15.000000000 +0200
-@@ -514,7 +514,7 @@ static int __init parse_crash_elf64_head
- 	/* Do some basic Verification. */
- 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
- 		(ehdr.e_type != ET_CORE) ||
--		!elf_check_arch(&ehdr) ||
-+		!vmcore_elf_check_arch(&ehdr) ||
- 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
- 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
- 		ehdr.e_version != EV_CURRENT ||
-diff -Nurp pristine-linux-2.6.18.2/include/asm-generic/pci.h tmp-linux-2.6-xen.patch/include/asm-generic/pci.h
---- pristine-linux-2.6.18.2/include/asm-generic/pci.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-generic/pci.h	2007-09-30 18:06:18.000000000 +0200
-@@ -43,7 +43,9 @@ pcibios_select_root(struct pci_dev *pdev
- 	return root;
- }
- 
-+#ifndef pcibios_scan_all_fns
- #define pcibios_scan_all_fns(a, b)	0
++/* Just any arbitrary offset to the start of the vmalloc VM area: the
++ * current 8MB value just means that there will be a 8MB "hole" after the
++ * physical memory until the kernel virtual memory starts.  That means that
++ * any out-of-bounds memory accesses will hopefully be caught.
++ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
++ * area for the same reason. ;)
++ */
++#define VMALLOC_OFFSET	(8*1024*1024)
++#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
++			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
++#ifdef CONFIG_HIGHMEM
++# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
++#else
++# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
 +#endif
- 
- #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
- static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-generic/vmlinux.lds.h tmp-linux-2.6-xen.patch/include/asm-generic/vmlinux.lds.h
---- pristine-linux-2.6.18.2/include/asm-generic/vmlinux.lds.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-generic/vmlinux.lds.h	2007-10-14 01:51:15.000000000 +0200
-@@ -194,3 +194,6 @@
- 		.stab.index 0 : { *(.stab.index) }			\
- 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
- 		.comment 0 : { *(.comment) }
 +
-+#define NOTES								\
-+		.notes : { *(.note.*) } :note
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/apic.h tmp-linux-2.6-xen.patch/include/asm-i386/apic.h
---- pristine-linux-2.6.18.2/include/asm-i386/apic.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/apic.h	2007-07-30 16:35:12.000000000 +0200
-@@ -119,10 +119,12 @@ extern void enable_NMI_through_LVT0 (voi
- 
- extern int disable_timer_pin_1;
- 
-+#ifndef CONFIG_XEN
- void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
- void switch_APIC_timer_to_ipi(void *cpumask);
- void switch_ipi_to_APIC_timer(void *cpumask);
- #define ARCH_APICTIMER_STOPS_ON_C3	1
-+#endif
- 
- extern int timer_over_8254;
- 
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/fixmap.h tmp-linux-2.6-xen.patch/include/asm-i386/fixmap.h
---- pristine-linux-2.6.18.2/include/asm-i386/fixmap.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/fixmap.h	2007-10-14 01:51:15.000000000 +0200
-@@ -19,7 +19,7 @@
-  * Leave one empty page between vmalloc'ed areas and
-  * the start of the fixmap.
-  */
--#define __FIXADDR_TOP	0xfffff000
-+extern unsigned long __FIXADDR_TOP;
- 
- #ifndef __ASSEMBLY__
- #include <linux/kernel.h>
-@@ -94,6 +94,8 @@ enum fixed_addresses {
- extern void __set_fixmap (enum fixed_addresses idx,
- 					unsigned long phys, pgprot_t flags);
- 
-+extern void set_fixaddr_top(unsigned long top);
++/*
++ * _PAGE_PSE set in the page directory entry just means that
++ * the page directory entry points directly to a 4MB-aligned block of
++ * memory. 
++ */
++#define _PAGE_BIT_PRESENT	0
++#define _PAGE_BIT_RW		1
++#define _PAGE_BIT_USER		2
++#define _PAGE_BIT_PWT		3
++#define _PAGE_BIT_PCD		4
++#define _PAGE_BIT_ACCESSED	5
++#define _PAGE_BIT_DIRTY		6
++#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
++/*#define _PAGE_BIT_UNUSED1	9*/	/* available for programmer */
++#define _PAGE_BIT_UNUSED2	10
++#define _PAGE_BIT_UNUSED3	11
++#define _PAGE_BIT_NX		63
 +
- #define set_fixmap(idx, phys) \
- 		__set_fixmap(idx, phys, PAGE_KERNEL)
- /*
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/kexec.h tmp-linux-2.6-xen.patch/include/asm-i386/kexec.h
---- pristine-linux-2.6.18.2/include/asm-i386/kexec.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/kexec.h	2007-07-30 16:35:12.000000000 +0200
-@@ -1,6 +1,26 @@
- #ifndef _I386_KEXEC_H
- #define _I386_KEXEC_H
- 
-+#define PA_CONTROL_PAGE  0
-+#define VA_CONTROL_PAGE  1
-+#define PA_PGD           2
-+#define VA_PGD           3
-+#define PA_PTE_0         4
-+#define VA_PTE_0         5
-+#define PA_PTE_1         6
-+#define VA_PTE_1         7
++#define _PAGE_PRESENT	0x001
++#define _PAGE_RW	0x002
++#define _PAGE_USER	0x004
++#define _PAGE_PWT	0x008
++#define _PAGE_PCD	0x010
++#define _PAGE_ACCESSED	0x020
++#define _PAGE_DIRTY	0x040
++#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
++/*#define _PAGE_UNUSED1	0x200*/	/* available for programmer */
++#define _PAGE_UNUSED2	0x400
++#define _PAGE_UNUSED3	0x800
++
++/* If _PAGE_PRESENT is clear, we use these: */
++#define _PAGE_FILE	0x040	/* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_PROTNONE	0x080	/* if the user mapped it with PROT_NONE;
++				   pte_present gives true */
 +#ifdef CONFIG_X86_PAE
-+#define PA_PMD_0         8
-+#define VA_PMD_0         9
-+#define PA_PMD_1         10
-+#define VA_PMD_1         11
-+#define PAGES_NR         12
++#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
 +#else
-+#define PAGES_NR         8
++#define _PAGE_NX	0
 +#endif
 +
-+#ifndef __ASSEMBLY__
-+
- #include <asm/fixmap.h>
- #include <asm/ptrace.h>
- #include <asm/string.h>
-@@ -27,6 +47,9 @@
- /* The native architecture */
- #define KEXEC_ARCH KEXEC_ARCH_386
- 
-+/* We can also handle crash dumps from 64 bit kernel. */
-+#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO	0x200
 +
- #define MAX_NOTE_BYTES 1024
- 
- /* CPU does not save ss and esp on stack if execution is already
-@@ -72,5 +95,26 @@ static inline void crash_setup_regs(stru
-                newregs->eip = (unsigned long)current_text_addr();
-        }
- }
-+asmlinkage NORET_TYPE void
-+relocate_kernel(unsigned long indirection_page,
-+		unsigned long control_page,
-+		unsigned long start_address,
-+		unsigned int has_pae) ATTRIB_NORET;
++#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
 +
++#define PAGE_NONE \
++	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED \
++	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 +
-+/* Under Xen we need to work with machine addresses. These macros give the
-+ * machine address of a certain page to the generic kexec code instead of 
-+ * the pseudo physical address which would be given by the default macros.
-+ */
++#define PAGE_SHARED_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY \
++	PAGE_COPY_NOEXEC
++#define PAGE_READONLY \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 +
-+#ifdef CONFIG_XEN
-+#define KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page)  pfn_to_mfn(page_to_pfn(page))
-+#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn(pfn))
-+#define kexec_virt_to_phys(addr) virt_to_machine(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
-+#endif
++#define _PAGE_KERNEL \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
++#define _PAGE_KERNEL_EXEC \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 +
-+#endif /* __ASSEMBLY__ */
- 
- #endif /* _I386_KEXEC_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-default/mach_traps.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-default/mach_traps.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-default/mach_traps.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-default/mach_traps.h	2007-07-30 16:35:12.000000000 +0200
-@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
- 	outb(reason, 0x61);
- }
- 
-+static inline void clear_io_check_error(unsigned char reason)
-+{
-+	unsigned long i;
++extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
++#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
 +
-+	reason = (reason & 0xf) | 8;
-+	outb(reason, 0x61);
-+	i = 2000;
-+	while (--i) udelay(1000);
-+	reason &= ~8;
-+	outb(reason, 0x61);
-+}
++#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
++#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
 +
- static inline unsigned char get_nmi_reason(void)
- {
- 	return inb(0x61);
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/agp.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/agp.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/agp.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/agp.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,37 @@
-+#ifndef AGP_H
-+#define AGP_H 1
++/*
++ * The i386 can't do page protection for execute, and considers that
++ * the same are read. Also, write permissions imply read permissions.
++ * This is the closest we can get..
++ */
++#define __P000	PAGE_NONE
++#define __P001	PAGE_READONLY
++#define __P010	PAGE_COPY
++#define __P011	PAGE_COPY
++#define __P100	PAGE_READONLY_EXEC
++#define __P101	PAGE_READONLY_EXEC
++#define __P110	PAGE_COPY_EXEC
++#define __P111	PAGE_COPY_EXEC
 +
-+#include <asm/pgtable.h>
-+#include <asm/cacheflush.h>
-+#include <asm/system.h>
++#define __S000	PAGE_NONE
++#define __S001	PAGE_READONLY
++#define __S010	PAGE_SHARED
++#define __S011	PAGE_SHARED
++#define __S100	PAGE_READONLY_EXEC
++#define __S101	PAGE_READONLY_EXEC
++#define __S110	PAGE_SHARED_EXEC
++#define __S111	PAGE_SHARED_EXEC
 +
-+/* 
-+ * Functions to keep the agpgart mappings coherent with the MMU.
-+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
-+ * mapped uncacheable. Make sure there are no conflicting mappings
-+ * with different cachability attributes for the same page. This avoids
-+ * data corruption on some CPUs.
++/*
++ * Define this if things work differently on an i386 and an i486:
++ * it will (on an i486) warn about kernel memory accesses that are
++ * done without a 'access_ok(VERIFY_WRITE,..)'
 + */
++#undef TEST_ACCESS_OK
 +
-+int map_page_into_agp(struct page *page);
-+int unmap_page_from_agp(struct page *page);
-+#define flush_agp_mappings() global_flush_tlb()
++/* The boot page tables (all created as a single array) */
++extern unsigned long pg0[];
 +
-+/* Could use CLFLUSH here if the cpu supports it. But then it would
-+   need to be called for each cacheline of the whole page so it may not be 
-+   worth it. Would need a page for it. */
-+#define flush_agp_cache() wbinvd()
++#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
 +
-+/* Convert a physical address to an address suitable for the GART. */
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
++/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
++#define pmd_none(x)	(!(unsigned long)__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++   can temporarily clear it. */
++#define pmd_present(x)	(__pmd_val(x))
++#define pmd_bad(x)	((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++#else
++#define pmd_present(x)	(__pmd_val(x) & _PAGE_PRESENT)
++#define pmd_bad(x)	((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
++#endif
 +
-+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#define alloc_gatt_pages(order)	({                                          \
-+	char *_t; dma_addr_t _d;                                            \
-+	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
-+	_t; })
-+#define free_gatt_pages(table, order)	\
-+	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/desc.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/desc.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/desc.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,164 @@
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 +
-+#include <asm/ldt.h>
-+#include <asm/segment.h>
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
++static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
++static inline int pte_huge(pte_t pte)		{ return (pte).pte_low & _PAGE_PSE; }
 +
-+#define CPU_16BIT_STACK_SIZE 1024
++/*
++ * The following only works if pte_present() is not true.
++ */
++static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
 +
-+#ifndef __ASSEMBLY__
++static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return pte; }
 +
-+#include <linux/preempt.h>
-+#include <linux/smp.h>
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level.h>
++#else
++# include <asm/pgtable-2level.h>
++#endif
 +
-+#include <asm/mmu.h>
++#define ptep_test_and_clear_dirty(vma, addr, ptep)			\
++({									\
++	pte_t __pte = *(ptep);						\
++	int __ret = pte_dirty(__pte);					\
++	if (__ret) {							\
++		__pte = pte_mkclean(__pte);				\
++		if ((vma)->vm_mm != current->mm ||			\
++		    HYPERVISOR_update_va_mapping(addr, __pte, 0))	\
++			(ptep)->pte_low = __pte.pte_low;		\
++	}								\
++	__ret;								\
++})
 +
-+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++#define ptep_test_and_clear_young(vma, addr, ptep)			\
++({									\
++	pte_t __pte = *(ptep);						\
++	int __ret = pte_young(__pte);					\
++	if (__ret)							\
++		__pte = pte_mkold(__pte);				\
++		if ((vma)->vm_mm != current->mm ||			\
++		    HYPERVISOR_update_va_mapping(addr, __pte, 0))	\
++			(ptep)->pte_low = __pte.pte_low;		\
++	__ret;								\
++})
 +
-+DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++#define ptep_get_and_clear_full(mm, addr, ptep, full)			\
++	((full) ? ({							\
++		pte_t __res = *(ptep);					\
++		if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
++			xen_l1_entry_update(ptep, __pte(0));		\
++		else							\
++			*(ptep) = __pte(0);				\
++		__res;							\
++	 }) :								\
++	 ptep_get_and_clear(mm, addr, ptep))
 +
-+struct Xgt_desc_struct {
-+	unsigned short size;
-+	unsigned long address __attribute__((packed));
-+	unsigned short pad;
-+} __attribute__ ((packed));
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	if (pte_write(pte))
++		set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
 +
-+extern struct Xgt_desc_struct idt_descr;
-+DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++/*
++ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
++ *
++ *  dst - pointer to pgd range anwhere on a pgd page
++ *  src - ""
++ *  count - the number of pgds to copy.
++ *
++ * dst and src can be on the same page, but the range must not overlap,
++ * and must not cross a page boundary.
++ */
++static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++{
++       memcpy(dst, src, count * sizeof(pgd_t));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
++ * it, this is a no-op.
++ */
++#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
++				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
 +
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
 +
-+static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
++#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
++
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 +{
-+	return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++	/*
++	 * Since this might change the present bit (which controls whether
++	 * a pte_t object has undergone p2m translation), we must use
++	 * pte_val() on the input pte and __pte() for the return value.
++	 */
++	paddr_t pteval = pte_val(pte);
++
++	pteval &= _PAGE_CHG_MASK;
++	pteval |= pgprot_val(newprot);
++#ifdef CONFIG_X86_PAE
++	pteval &= __supported_pte_mask;
++#endif
++	return __pte(pteval);
 +}
 +
-+#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
++#define pmd_large(pmd) \
++((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
 +
-+#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
-+#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
-+#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
-+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++/*
++ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
++ *
++ * this macro returns the index of the entry in the pgd page which would
++ * control the given virtual address
++ */
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index_k(addr) pgd_index(addr)
 +
-+#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
-+#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
-+#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
-+#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++/*
++ * pgd_offset() returns a (pgd_t *)
++ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
++ */
++#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 +
 +/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
++ * a shortcut which implies the use of the kernel's pgd, instead
++ * of a process's
 + */
-+extern struct desc_struct default_ldt[];
-+extern void set_intr_gate(unsigned int irq, void * addr);
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 +
-+#define _set_tssldt_desc(n,addr,limit,type) \
-+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
-+	"movw %w1,2(%2)\n\t" \
-+	"rorl $16,%1\n\t" \
-+	"movb %b1,4(%2)\n\t" \
-+	"movb %4,5(%2)\n\t" \
-+	"movb $0,6(%2)\n\t" \
-+	"movb %h1,7(%2)\n\t" \
-+	"rorl $16,%1" \
-+	: "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++/*
++ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
++ *
++ * this macro returns the index of the entry in the pmd page which would
++ * control the given virtual address
++ */
++#define pmd_index(address) \
++		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 +
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
-+{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
-+		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
-+}
++/*
++ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
++ *
++ * this macro returns the index of the entry in the pte page which would
++ * control the given virtual address
++ */
++#define pte_index(address) \
++		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) \
++	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
 +
-+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++		((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++/*
++ * Helper function that returns the kernel pagetable entry controlling
++ * the virtual address 'address'. NULL means no pagetable entry present.
++ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
++ * as a pte too.
++ */
++extern pte_t *lookup_address(unsigned long address);
++
++/*
++ * Make a given kernel text page executable/non-executable.
++ * Returns the previous executability setting of that page (which
++ * is used to restore the previous state). Used by the SMP bootup code.
++ * NOTE: this is an __init function for security reasons.
++ */
++#ifdef CONFIG_X86_PAE
++ extern int set_kernel_exec(unsigned long vaddr, int enable);
++#else
++ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
 +#endif
 +
-+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
-+{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
-+}
++extern void noexec_setup(const char *str);
 +
-+#define LDT_entry_a(info) \
-+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++#if defined(CONFIG_HIGHPTE)
++#define pte_offset_map(dir, address) \
++	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
++	 pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
++	 pte_index(address))
++#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
++#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
++#else
++#define pte_offset_map(dir, address) \
++	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) do { } while (0)
++#endif
 +
-+#define LDT_entry_b(info) \
-+	(((info)->base_addr & 0xff000000) | \
-+	(((info)->base_addr & 0x00ff0000) >> 16) | \
-+	((info)->limit & 0xf0000) | \
-+	(((info)->read_exec_only ^ 1) << 9) | \
-+	((info)->contents << 10) | \
-+	(((info)->seg_not_present ^ 1) << 15) | \
-+	((info)->seg_32bit << 22) | \
-+	((info)->limit_in_pages << 23) | \
-+	((info)->useable << 20) | \
-+	0x7000)
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval)			\
++	do {								\
++		if ( likely((vma)->vm_mm == current->mm) ) {		\
++			BUG_ON(HYPERVISOR_update_va_mapping(address,	\
++				pteval,					\
++				(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++					UVMF_INVLPG|UVMF_MULTI));	\
++		} else {						\
++			xen_l1_entry_update(ptep, pteval);		\
++			flush_tlb_page(vma, address);			\
++		}							\
++	} while (0)
 +
-+#define LDT_empty(info) (\
-+	(info)->base_addr	== 0	&& \
-+	(info)->limit		== 0	&& \
-+	(info)->contents	== 0	&& \
-+	(info)->read_exec_only	== 1	&& \
-+	(info)->seg_32bit	== 0	&& \
-+	(info)->limit_in_pages	== 0	&& \
-+	(info)->seg_not_present	== 1	&& \
-+	(info)->useable		== 0	)
++/*
++ * The i386 doesn't have any external MMU info: the kernel page
++ * tables contain all the necessary information.
++ *
++ * Also, we only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
++	do {								\
++		if (dirty)						\
++			ptep_establish(vma, address, ptep, entry);	\
++	} while (0)
 +
-+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++#include <xen/features.h>
++void make_lowmem_page_readonly(void *va, unsigned int feature);
++void make_lowmem_page_writable(void *va, unsigned int feature);
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
 +
-+#if TLS_SIZE != 24
-+# error update this code.
-+#endif
++#define virt_to_ptep(va)						\
++({									\
++	pte_t *__ptep = lookup_address((unsigned long)(va));		\
++	BUG_ON(!__ptep || !pte_present(*__ptep));			\
++	__ptep;								\
++})
 +
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
-+	C(0); C(1); C(2);
-+#undef C
-+}
++#define arbitrary_virt_to_machine(va)					\
++	(((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT)		\
++	 | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++
++#endif /* !__ASSEMBLY__ */
++
++#ifdef CONFIG_FLATMEM
++#define kern_addr_valid(addr)	(1)
++#endif /* CONFIG_FLATMEM */
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++                           unsigned long address, 
++                           unsigned long mfn,
++                           unsigned long size, 
++                           pgprot_t prot,
++                           domid_t  domid);
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid);
++int create_lookup_pte_addr(struct mm_struct *mm,
++                           unsigned long address,
++                           uint64_t *ptep);
++int touch_pte_range(struct mm_struct *mm,
++                    unsigned long address,
++                    unsigned long size);
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++		unsigned long addr, unsigned long end, pgprot_t newprot);
++
++#define arch_change_pte_range(mm, pmd, addr, end, newprot)	\
++		xen_change_pte_range(mm, pmd, addr, end, newprot)
++
++#define io_remap_pfn_range(vma,from,pfn,size,prot) \
++direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
 +
-+static inline void clear_LDT(void)
-+{
-+	int cpu = get_cpu();
++#define MK_IOSPACE_PFN(space, pfn)	(pfn)
++#define GET_IOSPACE(pfn)		0
++#define GET_PFN(pfn)			(pfn)
 +
-+	/*
-+	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+	 * it slows down context switching. Noone uses it anyway.
-+	 */
-+	cpu = cpu;		/* XXX avoid compiler warning */
-+	xen_set_ldt(0UL, 0);
-+	put_cpu();
-+}
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
 +
++#endif /* _I386_PGTABLE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/processor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/processor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,743 @@
 +/*
-+ * load one particular LDT into the current CPU
++ * include/asm-i386/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
 + */
-+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
-+{
-+	void *segments = pc->ldt;
-+	int count = pc->size;
-+
-+	if (likely(!count))
-+		segments = NULL;
 +
-+	xen_set_ldt((unsigned long)segments, count);
-+}
++#ifndef __ASM_I386_PROCESSOR_H
++#define __ASM_I386_PROCESSOR_H
 +
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+	int cpu = get_cpu();
-+	load_LDT_nolock(pc, cpu);
-+	put_cpu();
-+}
++#include <asm/vm86.h>
++#include <asm/math_emu.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/cache.h>
++#include <linux/threads.h>
++#include <asm/percpu.h>
++#include <linux/cpumask.h>
++#include <xen/interface/physdev.h>
 +
-+static inline unsigned long get_desc_base(unsigned long *desc)
-+{
-+	unsigned long base;
-+	base = ((desc[0] >> 16)  & 0x0000ffff) |
-+		((desc[1] << 16) & 0x00ff0000) |
-+		(desc[1] & 0xff000000);
-+	return base;
-+}
++/* flag for disabling the tsc */
++extern int tsc_disable;
 +
-+#endif /* !__ASSEMBLY__ */
++struct desc_struct {
++	unsigned long a,b;
++};
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/dma-mapping.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/dma-mapping.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/dma-mapping.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,157 @@
-+#ifndef _ASM_I386_DMA_MAPPING_H
-+#define _ASM_I386_DMA_MAPPING_H
++#define desc_empty(desc) \
++		(!((desc)->a | (desc)->b))
 +
++#define desc_equal(desc1, desc2) \
++		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
 +/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
 + */
++#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
 +
-+#include <linux/mm.h>
-+#include <asm/cache.h>
-+#include <asm/io.h>
-+#include <asm/scatterlist.h>
-+#include <asm/swiotlb.h>
-+
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+	dma_addr_t mask = 0xffffffff;
-+	/* If the device has a mask, use it, otherwise default to 32 bits */
-+	if (hwdev && hwdev->dma_mask)
-+		mask = *hwdev->dma_mask;
-+	return (addr & ~mask) != 0;
-+}
-+
-+static inline int
-+range_straddles_page_boundary(paddr_t p, size_t size)
-+{
-+	extern unsigned long *contiguous_bitmap;
-+	return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+		!test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
-+}
++/*
++ *  CPU type and hardware bug flags. Kept separately for each CPU.
++ *  Members of this structure are referenced in head.S, so think twice
++ *  before touching them. [mj]
++ */
 +
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++struct cpuinfo_x86 {
++	__u8	x86;		/* CPU family */
++	__u8	x86_vendor;	/* CPU vendor */
++	__u8	x86_model;
++	__u8	x86_mask;
++	char	wp_works_ok;	/* It doesn't on 386's */
++	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
++	char	hard_math;
++	char	rfu;
++       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
++	unsigned long	x86_capability[NCAPINTS];
++	char	x86_vendor_id[16];
++	char	x86_model_id[64];
++	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
++				    call  */
++	int 	x86_cache_alignment;	/* In bytes */
++	char	fdiv_bug;
++	char	f00f_bug;
++	char	coma_bug;
++	char	pad0;
++	int	x86_power;
++	unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
++#endif
++	unsigned char x86_max_cores;	/* cpuid returned max cores value */
++	unsigned char apicid;
++#ifdef CONFIG_SMP
++	unsigned char booted_cores;	/* number of cores as seen by OS */
++	__u8 phys_proc_id; 		/* Physical processor id. */
++	__u8 cpu_core_id;  		/* Core id */
++#endif
++} __attribute__((__aligned__(SMP_CACHE_BYTES)));
 +
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+			   dma_addr_t *dma_handle, gfp_t flag);
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NSC 8
++#define X86_VENDOR_NUM 9
++#define X86_VENDOR_UNKNOWN 0xff
 +
-+void dma_free_coherent(struct device *dev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle);
++/*
++ * capabilities of CPUs
++ */
 +
-+extern dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+	       enum dma_data_direction direction);
++extern struct cpuinfo_x86 boot_cpu_data;
++extern struct cpuinfo_x86 new_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++extern struct tss_struct doublefault_tss;
++DECLARE_PER_CPU(struct tss_struct, init_tss);
++#endif
 +
-+extern void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+		 enum dma_data_direction direction);
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
 +
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, enum dma_data_direction direction);
++extern	int cpu_llc_id[NR_CPUS];
++extern char ignore_fpu_irq;
 +
-+#ifdef CONFIG_HIGHMEM
-+extern dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+	     size_t size, enum dma_data_direction direction);
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
 +
-+extern void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+	       enum dma_data_direction direction);
++#ifdef CONFIG_X86_HT
++extern void detect_ht(struct cpuinfo_x86 *c);
 +#else
-+#define dma_map_page(dev, page, offset, size, dir) \
-+	dma_map_single(dev, page_address(page) + (offset), (size), (dir))
-+#define dma_unmap_page dma_unmap_single
++static inline void detect_ht(struct cpuinfo_x86 *c) {}
 +#endif
 +
-+extern void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+			enum dma_data_direction direction);
-+
-+extern void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+                           enum dma_data_direction direction);
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
 +
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-+			      unsigned long offset, size_t size,
-+			      enum dma_data_direction direction)
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
 +{
-+	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
++	__asm__(XEN_CPUID
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op), "c"(0));
 +}
 +
-+static inline void
-+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-+				 unsigned long offset, size_t size,
-+				 enum dma_data_direction direction)
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++	       	int *edx)
 +{
-+	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
++	__asm__(XEN_CPUID
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op), "c" (count));
 +}
 +
-+static inline void
-+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-+		    enum dma_data_direction direction)
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
 +{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
-+	flush_write_buffers();
-+}
++	unsigned int eax;
 +
-+static inline void
-+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-+		    enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
-+	flush_write_buffers();
++	__asm__(XEN_CPUID
++		: "=a" (eax)
++		: "0" (op)
++		: "bx", "cx", "dx");
++	return eax;
 +}
-+
-+extern int
-+dma_mapping_error(dma_addr_t dma_addr);
-+
-+extern int
-+dma_supported(struct device *dev, u64 mask);
-+
-+static inline int
-+dma_set_mask(struct device *dev, u64 mask)
++static inline unsigned int cpuid_ebx(unsigned int op)
 +{
-+	if(!dev->dma_mask || !dma_supported(dev, mask))
-+		return -EIO;
-+
-+	*dev->dma_mask = mask;
++	unsigned int eax, ebx;
 +
-+	return 0;
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=b" (ebx)
++		: "0" (op)
++		: "cx", "dx" );
++	return ebx;
 +}
-+
-+static inline int
-+dma_get_cache_alignment(void)
++static inline unsigned int cpuid_ecx(unsigned int op)
 +{
-+	/* no easy way to get cache size on all x86, so return the
-+	 * maximum possible, to be safe */
-+	return (1 << INTERNODE_CACHE_SHIFT);
-+}
-+
-+#define dma_is_consistent(d)	(1)
++	unsigned int eax, ecx;
 +
-+static inline void
-+dma_cache_sync(void *vaddr, size_t size,
-+	       enum dma_data_direction direction)
-+{
-+	flush_write_buffers();
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=c" (ecx)
++		: "0" (op)
++		: "bx", "dx" );
++	return ecx;
 +}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++	unsigned int eax, edx;
 +
-+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+extern int
-+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+			    dma_addr_t device_addr, size_t size, int flags);
-+
-+extern void
-+dma_release_declared_memory(struct device *dev);
-+
-+extern void *
-+dma_mark_declared_memory_occupied(struct device *dev,
-+				  dma_addr_t device_addr, size_t size);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/fixmap.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/fixmap.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/fixmap.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,155 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
-+
-+
-+/* used by vmalloc.c, vsyscall.lds.S.
-+ *
-+ * Leave one empty page between vmalloc'ed areas and
-+ * the start of the fixmap.
-+ */
-+extern unsigned long __FIXADDR_TOP;
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=d" (edx)
++		: "0" (op)
++		: "bx", "cx");
++	return edx;
++}
 +
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <asm/acpi.h>
-+#include <asm/apicdef.h>
-+#include <asm/page.h>
-+#ifdef CONFIG_HIGHMEM
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#endif
++#define load_cr3(pgdir) write_cr3(__pa(pgdir))
 +
 +/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process. We allocate these special addresses
-+ * from the end of virtual memory (0xfffff000) backwards.
-+ * Also this lets us do fail-safe vmalloc(), we
-+ * can guarantee that these special addresses and
-+ * vmalloc()-ed addresses never overlap.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
++ * Intel CPU features in CR4
 + */
-+enum fixed_addresses {
-+	FIX_HOLE,
-+	FIX_VDSO,
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	FIX_IO_APIC_BASE_0,
-+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#ifdef CONFIG_X86_VISWS_APIC
-+	FIX_CO_CPU,	/* Cobalt timer */
-+	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
-+	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
-+	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
-+#endif
-+#ifdef CONFIG_X86_F00F_BUG
-+	FIX_F00F_IDT,	/* Virtual mapping for IDT */
-+#endif
-+#ifdef CONFIG_X86_CYCLONE_TIMER
-+	FIX_CYCLONE_TIMER, /*cyclone timer register*/
-+#endif 
-+#ifdef CONFIG_HIGHMEM
-+	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
-+	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-+#endif
-+#ifdef CONFIG_ACPI
-+	FIX_ACPI_BEGIN,
-+	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+#ifdef CONFIG_PCI_MMCONFIG
-+	FIX_PCIE_MCFG,
-+#endif
-+	FIX_SHARED_INFO,
-+#define NR_FIX_ISAMAPS	256
-+	FIX_ISAMAP_END,
-+	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+	__end_of_permanent_fixed_addresses,
-+	/* temporary boot-time mappings, used before ioremap() is functional */
-+#define NR_FIX_BTMAPS	16
-+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-+	FIX_WP_TEST,
-+	__end_of_fixed_addresses
-+};
-+
-+extern void set_fixaddr_top(unsigned long top);
-+
-+extern void __set_fixmap(enum fixed_addresses idx,
-+					maddr_t phys, pgprot_t flags);
++#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
++#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
++#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
++#define X86_CR4_DE		0x0008	/* enable debugging extensions */
++#define X86_CR4_PSE		0x0010	/* enable page size extensions */
++#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
++#define X86_CR4_MCE		0x0040	/* Machine check enable */
++#define X86_CR4_PGE		0x0080	/* enable global pages */
++#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
 +
-+#define set_fixmap(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL)
 +/*
-+ * Some hardware wants to get fixmapped without caching.
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
 + */
-+#define set_fixmap_nocache(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+		__set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
-+
-+#define __FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-+#define __FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
-+#define FIXADDR_BOOT_START	(FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
-+
-+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
-+#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-+
-+extern void __this_fixmap_does_not_exist(void);
++extern unsigned long mmu_cr4_features;
 +
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without tranlation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++static inline void set_in_cr4 (unsigned long mask)
 +{
-+	/*
-+	 * this branch gets completely eliminated after inlining,
-+	 * except when someone tries to use fixaddr indices in an
-+	 * illegal way. (such as mixing up address types or using
-+	 * out-of-range indices).
-+	 *
-+	 * If it doesn't get removed, the linker will complain
-+	 * loudly with a reasonably clear error message..
-+	 */
-+	if (idx >= __end_of_fixed_addresses)
-+		__this_fixmap_does_not_exist();
-+
-+        return __fix_to_virt(idx);
++	unsigned cr4;
++	mmu_cr4_features |= mask;
++	cr4 = read_cr4();
++	cr4 |= mask;
++	write_cr4(cr4);
 +}
 +
-+static inline unsigned long virt_to_fix(const unsigned long vaddr)
++static inline void clear_in_cr4 (unsigned long mask)
 +{
-+	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-+	return __virt_to_fix(vaddr);
++	unsigned cr4;
++	mmu_cr4_features &= ~mask;
++	cr4 = read_cr4();
++	cr4 &= ~mask;
++	write_cr4(cr4);
 +}
 +
-+#endif /* !__ASSEMBLY__ */
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/floppy.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/floppy.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/floppy.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,147 @@
 +/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ *      NSC/Cyrix CPU configuration register indexes
 + */
-+#ifndef __ASM_XEN_I386_FLOPPY_H
-+#define __ASM_XEN_I386_FLOPPY_H
 +
-+#include <linux/vmalloc.h>
++#define CX86_PCR0 0x20
++#define CX86_GCR  0xb8
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_PCR1 0xf0
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
 +
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port)			inb_p(port)
-+#define fd_outb(value,port)		outb_p(value,port)
-+
-+#define fd_request_dma()        (0)
-+#define fd_free_dma()           ((void)0)
-+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue()    (virtual_dma_count + virtual_dma_residue)
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+/*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+#define fd_dma_mem_alloc(size)	__get_free_pages(GFP_KERNEL, get_order(size))
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
-+{
-+	register unsigned char st;
-+	register int lcount;
-+	register char *lptr;
-+
-+	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
-+
-+	st = 1;
-+	for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
-+	    lcount; lcount--, lptr++) {
-+		st=inb(virtual_dma_port+4) & 0xa0 ;
-+		if(st != 0xa0) 
-+			break;
-+		if(virtual_dma_mode)
-+			outb_p(*lptr, virtual_dma_port+5);
-+		else
-+			*lptr = inb_p(virtual_dma_port+5);
-+	}
-+	virtual_dma_count = lcount;
-+	virtual_dma_addr = lptr;
-+	st = inb(virtual_dma_port+4);
++/*
++ *      NSC/Cyrix CPU indexed register access macros
++ */
 +
-+	if(st == 0x20)
-+		return IRQ_HANDLED;
-+	if(!(st & 0x20)) {
-+		virtual_dma_residue += virtual_dma_count;
-+		virtual_dma_count=0;
-+		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
-+		return IRQ_HANDLED;
-+	}
-+	return IRQ_HANDLED;
-+}
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++	outb((reg), 0x22); \
++	outb((data), 0x23); \
++} while (0)
 +
-+static void fd_disable_dma(void)
++/* Stop speculative execution */
++static inline void sync_core(void)
 +{
-+	doing_pdma = 0;
-+	virtual_dma_residue += virtual_dma_count;
-+	virtual_dma_count=0;
++	int tmp;
++	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
 +}
 +
-+static int fd_request_irq(void)
++static inline void __monitor(const void *eax, unsigned long ecx,
++		unsigned long edx)
 +{
-+	return request_irq(FLOPPY_IRQ, floppy_hardint,
-+			   IRQF_DISABLED, "floppy", NULL);
++	/* "monitor %eax,%ecx,%edx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc8;"
++		: :"a" (eax), "c" (ecx), "d"(edx));
 +}
 +
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++static inline void __mwait(unsigned long eax, unsigned long ecx)
 +{
-+	doing_pdma = 1;
-+	virtual_dma_port = io;
-+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
-+	virtual_dma_addr = addr;
-+	virtual_dma_count = size;
-+	virtual_dma_residue = 0;
-+	return 0;
++	/* "mwait %eax,%ecx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc9;"
++		: :"a" (eax), "c" (ecx));
 +}
 +
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
++/* from system description table in BIOS.  Mostly for MCA use, but
++others may find it useful. */
++extern unsigned int machine_id;
++extern unsigned int machine_submodel_id;
++extern unsigned int BIOS_revision;
++extern unsigned int mca_pentium_flag;
 +
-+static int xen_floppy_init(void)
-+{
-+	use_virtual_dma = 1;
-+	can_use_virtual_dma = 1;
-+	return 0x3f0;
-+}
++/* Boot loader type from the setup header */
++extern int bootloader_type;
 +
 +/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user.		Paul G.
++ * User space process size: 3GB (default).
 + */
-+#define FLOPPY0_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = (CMOS_READ(0x10) >> 4) & 15;		\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define FLOPPY1_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = CMOS_READ(0x10) & 15;			\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
++#define TASK_SIZE	(PAGE_OFFSET)
 +
-+#define FLOPPY_MOTOR_MASK 0xf0
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
 +
-+#define EXTRA_FLOPPY_PARAMS
++#define HAVE_ARCH_PICK_MMAP_LAYOUT
 +
-+#endif /* __ASM_XEN_I386_FLOPPY_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/highmem.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/highmem.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/highmem.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/highmem.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,80 @@
 +/*
-+ * highmem.h: virtual kernel memory mappings for high memory
-+ *
-+ * Used in CONFIG_HIGHMEM systems for memory pages which
-+ * are not addressable by direct kernel virtual addresses.
-+ *
-+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
-+ *		      Gerhard.Wichert at pdb.siemens.de
-+ *
-+ *
-+ * Redesigned the x86 32-bit VM architecture to deal with 
-+ * up to 16 Terabyte physical memory. With current x86 CPUs
-+ * we now support up to 64 Gigabytes physical RAM.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ * Size of io_bitmap.
 + */
++#define IO_BITMAP_BITS  65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
 +
-+#ifndef _ASM_HIGHMEM_H
-+#define _ASM_HIGHMEM_H
-+
-+#ifdef __KERNEL__
++struct i387_fsave_struct {
++	long	cwd;
++	long	swd;
++	long	twd;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
++	long	status;		/* software status information */
++};
 +
-+#include <linux/interrupt.h>
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#include <asm/tlbflush.h>
++struct i387_fxsave_struct {
++	unsigned short	cwd;
++	unsigned short	swd;
++	unsigned short	twd;
++	unsigned short	fop;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	mxcsr;
++	long	mxcsr_mask;
++	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
++	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
++	long	padding[56];
++} __attribute__ ((aligned (16)));
 +
-+/* declarations for highmem.c */
-+extern unsigned long highstart_pfn, highend_pfn;
++struct i387_soft_struct {
++	long	cwd;
++	long	swd;
++	long	twd;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
++	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
++	struct info	*info;
++	unsigned long	entry_eip;
++};
 +
-+extern pte_t *kmap_pte;
-+extern pgprot_t kmap_prot;
-+extern pte_t *pkmap_page_table;
++union i387_union {
++	struct i387_fsave_struct	fsave;
++	struct i387_fxsave_struct	fxsave;
++	struct i387_soft_struct soft;
++};
 +
-+/*
-+ * Right now we initialize only a single pte table. It can be extended
-+ * easily, subsequent pte tables have to be allocated in one physical
-+ * chunk of RAM.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define LAST_PKMAP 512
-+#else
-+#define LAST_PKMAP 1024
-+#endif
-+/*
-+ * Ordering is:
-+ *
-+ * FIXADDR_TOP
-+ * 			fixed_addresses
-+ * FIXADDR_START
-+ * 			temp fixed addresses
-+ * FIXADDR_BOOT_START
-+ * 			Persistent kmap area
-+ * PKMAP_BASE
-+ * VMALLOC_END
-+ * 			Vmalloc area
-+ * VMALLOC_START
-+ * high_memory
-+ */
-+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
-+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
++typedef struct {
++	unsigned long seg;
++} mm_segment_t;
 +
-+extern void * FASTCALL(kmap_high(struct page *page));
-+extern void FASTCALL(kunmap_high(struct page *page));
++struct thread_struct;
 +
-+void *kmap(struct page *page);
-+void kunmap(struct page *page);
-+void *kmap_atomic(struct page *page, enum km_type type);
-+void *kmap_atomic_pte(struct page *page, enum km_type type);
-+void kunmap_atomic(void *kvaddr, enum km_type type);
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-+struct page *kmap_atomic_to_page(void *ptr);
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++	unsigned short	back_link,__blh;
++	unsigned long	esp0;
++	unsigned short	ss0,__ss0h;
++	unsigned long	esp1;
++	unsigned short	ss1,__ss1h;	/* ss1 is used to cache MSR_IA32_SYSENTER_CS */
++	unsigned long	esp2;
++	unsigned short	ss2,__ss2h;
++	unsigned long	__cr3;
++	unsigned long	eip;
++	unsigned long	eflags;
++	unsigned long	eax,ecx,edx,ebx;
++	unsigned long	esp;
++	unsigned long	ebp;
++	unsigned long	esi;
++	unsigned long	edi;
++	unsigned short	es, __esh;
++	unsigned short	cs, __csh;
++	unsigned short	ss, __ssh;
++	unsigned short	ds, __dsh;
++	unsigned short	fs, __fsh;
++	unsigned short	gs, __gsh;
++	unsigned short	ldt, __ldth;
++	unsigned short	trace, io_bitmap_base;
++	/*
++	 * The extra 1 is there because the CPU will access an
++	 * additional byte beyond the end of the IO permission
++	 * bitmap. The extra byte must be all 1 bits, and must
++	 * be within the limit.
++	 */
++	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
++	/*
++	 * Cache the current maximum and the last task that used the bitmap:
++	 */
++	unsigned long io_bitmap_max;
++	struct thread_struct *io_bitmap_owner;
++	/*
++	 * pads the TSS to be cacheline-aligned (size is 0x100)
++	 */
++	unsigned long __cacheline_filler[35];
++	/*
++	 * .. and then another 0x100 bytes for emergency kernel stack
++	 */
++	unsigned long stack[64];
++} __attribute__((packed));
++#endif
 +
-+#define flush_cache_kmaps()	do { } while (0)
++#define ARCH_MIN_TASKALIGN	16
 +
-+#endif /* __KERNEL__ */
++struct thread_struct {
++/* cached TLS descriptors. */
++	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
++	unsigned long	esp0;
++	unsigned long	sysenter_cs;
++	unsigned long	eip;
++	unsigned long	esp;
++	unsigned long	fs;
++	unsigned long	gs;
++/* Hardware debugging registers */
++	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
++/* fault info */
++	unsigned long	cr2, trap_no, error_code;
++/* floating point info */
++	union i387_union	i387;
++/* virtual 86 mode info */
++	struct vm86_struct __user * vm86_info;
++	unsigned long		screen_bitmap;
++	unsigned long		v86flags, v86mask, saved_esp0;
++	unsigned int		saved_fs, saved_gs;
++/* IO permissions */
++	unsigned long	*io_bitmap_ptr;
++ 	unsigned long	iopl;
++/* max allowed port in the bitmap, in bytes: */
++	unsigned long	io_bitmap_max;
++};
 +
-+#endif /* _ASM_HIGHMEM_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hw_irq.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hw_irq.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hw_irq.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,72 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
++#define INIT_THREAD  {							\
++	.vm86_info = NULL,						\
++	.sysenter_cs = __KERNEL_CS,					\
++	.io_bitmap_ptr = NULL,						\
++}
 +
++#ifndef CONFIG_X86_NO_TSS
 +/*
-+ *	linux/include/asm/hw_irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
++ * Note that the .io_bitmap member must be extra-big. This is because
++ * the CPU will access an additional byte beyond the end of the IO
++ * permission bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
 + */
++#define INIT_TSS  {							\
++	.esp0		= sizeof(init_stack) + (long)&init_stack,	\
++	.ss0		= __KERNEL_DS,					\
++	.ss1		= __KERNEL_CS,					\
++	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
++	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
++}
 +
-+#include <linux/profile.h>
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <asm/sections.h>
-+
-+struct hw_interrupt_type;
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++	tss->esp0 = thread->esp0;
++	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
++	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++		tss->ss1 = thread->sysenter_cs;
++		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++	}
++}
++#define load_esp0(tss, thread) \
++	__load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) do { \
++	if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \
++		BUG(); \
++} while (0)
++#endif
 +
-+#define NMI_VECTOR		0x02
++#define start_thread(regs, new_eip, new_esp) do {		\
++	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
++	set_fs(USER_DS);					\
++	regs->xds = __USER_DS;					\
++	regs->xes = __USER_DS;					\
++	regs->xss = __USER_DS;					\
++	regs->xcs = __USER_CS;					\
++	regs->eip = new_eip;					\
++	regs->esp = new_esp;					\
++} while (0)
 +
 +/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
++ * These special macros can be used to get or set a debugging register
 + */
++#define get_debugreg(var, register)				\
++		(var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register)			\
++		WARN_ON(HYPERVISOR_set_debugreg((register), (value)))
 +
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+
-+#ifdef CONFIG_SMP
-+fastcall void reschedule_interrupt(void);
-+fastcall void invalidate_interrupt(void);
-+fastcall void call_function_interrupt(void);
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+fastcall void apic_timer_interrupt(void);
-+fastcall void error_interrupt(void);
-+fastcall void spurious_interrupt(void);
-+fastcall void thermal_interrupt(struct pt_regs *);
-+#define platform_legacy_irq(irq)	((irq) < 16)
-+#endif
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static inline void set_iopl_mask(unsigned mask)
++{
++	struct physdev_set_iopl set_iopl;
 +
-+void disable_8259A_irq(unsigned int irq);
-+void enable_8259A_irq(unsigned int irq);
-+int i8259A_irq_pending(unsigned int irq);
-+void make_8259A_irq(unsigned int irq);
-+void init_8259A(int aeoi);
-+void FASTCALL(send_IPI_self(int vector));
-+void init_VISWS_APIC_irqs(void);
-+void setup_IO_APIC(void);
-+void disable_IO_APIC(void);
-+void print_IO_APIC(void);
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+void send_IPI(int dest, int vector);
-+void setup_ioapic_dest(void);
++	/* Force the change at ring 0. */
++	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++}
 +
-+extern unsigned long io_apic_irqs;
++/* Forward declaration, a strange C thing */
++struct task_struct;
++struct mm_struct;
 +
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
 +
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
 +
-+#endif /* _ASM_HW_IRQ_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hypercall.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hypercall.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hypercall.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,407 @@
-+/******************************************************************************
-+ * hypercall.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++/*
++ * create a kernel thread without removing it from tasklists
 + */
++extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 +
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
++extern unsigned long thread_saved_pc(struct task_struct *tsk);
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
 +
-+#include <linux/string.h> /* memcpy() */
++unsigned long get_wchan(struct task_struct *p);
 +
-+#ifndef __HYPERVISOR_H__
-+# error "please don't include this file directly"
-+#endif
++#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
++#define KSTK_TOP(info)                                                 \
++({                                                                     \
++       unsigned long *__ptr = (unsigned long *)(info);                 \
++       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
++})
 +
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
++/*
++ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
++ * This is necessary to guarantee that the entire "struct pt_regs"
++ * is accessable even if the CPU haven't stored the SS/ESP registers
++ * on the stack (interrupt gate does not save these registers
++ * when switching to the same priv ring).
++ * Therefore beware: accessing the xss/esp fields of the
++ * "struct pt_regs" is possible, but they may contain the
++ * completely wrong values.
++ */
++#define task_pt_regs(task)                                             \
++({                                                                     \
++       struct pt_regs *__regs__;                                       \
++       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++       __regs__ - 1;                                                   \
++})
 +
-+#ifdef CONFIG_XEN
-+#define HYPERCALL_STR(name)					\
-+	"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
-+#else
-+#define HYPERCALL_STR(name)					\
-+	"mov hypercall_stubs,%%eax; "				\
-+	"add $("STR(__HYPERVISOR_##name)" * 32),%%eax; "	\
-+	"call *%%eax"
-+#endif
++#define KSTK_EIP(task) (task_pt_regs(task)->eip)
++#define KSTK_ESP(task) (task_pt_regs(task)->esp)
 +
-+#define _hypercall0(type, name)			\
-+({						\
-+	long __res;				\
-+	asm volatile (				\
-+		HYPERCALL_STR(name)		\
-+		: "=a" (__res)			\
-+		:				\
-+		: "memory" );			\
-+	(type)__res;				\
-+})
 +
-+#define _hypercall1(type, name, a1)				\
-+({								\
-+	long __res, __ign1;					\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=b" (__ign1)			\
-+		: "1" ((long)(a1))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++struct microcode_header {
++	unsigned int hdrver;
++	unsigned int rev;
++	unsigned int date;
++	unsigned int sig;
++	unsigned int cksum;
++	unsigned int ldrver;
++	unsigned int pf;
++	unsigned int datasize;
++	unsigned int totalsize;
++	unsigned int reserved[3];
++};
 +
-+#define _hypercall2(type, name, a1, a2)				\
-+({								\
-+	long __res, __ign1, __ign2;				\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++struct microcode {
++	struct microcode_header hdr;
++	unsigned int bits[0];
++};
 +
-+#define _hypercall3(type, name, a1, a2, a3)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
 +
-+#define _hypercall4(type, name, a1, a2, a3, a4)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3, __ign4;		\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
-+		"=d" (__ign3), "=S" (__ign4)			\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "4" ((long)(a4))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++	unsigned int sig;
++	unsigned int pf;
++	unsigned int cksum;
++};
 +
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
-+({								\
-+	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;	\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
-+		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "4" ((long)(a4)),		\
-+		"5" ((long)(a5))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++struct extended_sigtable {
++	unsigned int count;
++	unsigned int cksum;
++	unsigned int reserved[3];
++	struct extended_signature sigs[0];
++};
 +
-+static inline int
-+HYPERVISOR_set_trap_table(
-+	trap_info_t *table)
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
 +{
-+	return _hypercall1(int, set_trap_table, table);
++	__asm__ __volatile__("rep;nop": : :"memory");
 +}
 +
-+static inline int
-+HYPERVISOR_mmu_update(
-+	mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
++#define cpu_relax()	rep_nop()
 +
-+static inline int
-+HYPERVISOR_mmuext_op(
-+	struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
++/* generic versions from gas */
++#define GENERIC_NOP1	".byte 0x90\n"
++#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
++#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
++#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
++#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
++#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7
 +
-+static inline int
-+HYPERVISOR_set_gdt(
-+	unsigned long *frame_list, int entries)
-+{
-+	return _hypercall2(int, set_gdt, frame_list, entries);
-+}
++/* Opteron nops */
++#define K8_NOP1 GENERIC_NOP1
++#define K8_NOP2	".byte 0x66,0x90\n" 
++#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
++#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
++#define K8_NOP5	K8_NOP3 K8_NOP2 
++#define K8_NOP6	K8_NOP3 K8_NOP3
++#define K8_NOP7	K8_NOP4 K8_NOP3
++#define K8_NOP8	K8_NOP4 K8_NOP4
 +
-+static inline int
-+HYPERVISOR_stack_switch(
-+	unsigned long ss, unsigned long esp)
-+{
-+	return _hypercall2(int, stack_switch, ss, esp);
-+}
++/* K7 nops */
++/* uses eax dependencies (arbitary choice) */
++#define K7_NOP1  GENERIC_NOP1
++#define K7_NOP2	".byte 0x8b,0xc0\n" 
++#define K7_NOP3	".byte 0x8d,0x04,0x20\n"
++#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"
++#define K7_NOP5	K7_NOP4 ASM_NOP1
++#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"
++#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
++#define K7_NOP8        K7_NOP7 ASM_NOP1
 +
-+static inline int
-+HYPERVISOR_set_callbacks(
-+	unsigned long event_selector, unsigned long event_address,
-+	unsigned long failsafe_selector, unsigned long failsafe_address)
-+{
-+	return _hypercall4(int, set_callbacks,
-+			   event_selector, event_address,
-+			   failsafe_selector, failsafe_address);
-+}
++#ifdef CONFIG_MK8
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++#elif defined(CONFIG_MK7)
++#define ASM_NOP1 K7_NOP1
++#define ASM_NOP2 K7_NOP2
++#define ASM_NOP3 K7_NOP3
++#define ASM_NOP4 K7_NOP4
++#define ASM_NOP5 K7_NOP5
++#define ASM_NOP6 K7_NOP6
++#define ASM_NOP7 K7_NOP7
++#define ASM_NOP8 K7_NOP8
++#else
++#define ASM_NOP1 GENERIC_NOP1
++#define ASM_NOP2 GENERIC_NOP2
++#define ASM_NOP3 GENERIC_NOP3
++#define ASM_NOP4 GENERIC_NOP4
++#define ASM_NOP5 GENERIC_NOP5
++#define ASM_NOP6 GENERIC_NOP6
++#define ASM_NOP7 GENERIC_NOP7
++#define ASM_NOP8 GENERIC_NOP8
++#endif
 +
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+	int set)
-+{
-+	return _hypercall1(int, fpu_taskswitch, set);
-+}
++#define ASM_NOP_MAX 8
 +
-+static inline int
-+HYPERVISOR_sched_op_compat(
-+	int cmd, unsigned long arg)
++/* Prefetch instructions for Pentium III and AMD Athlon */
++/* It's not worth to care about 3dnow! prefetches for the K6
++   because they are microcoded there and very slow.
++   However we don't do prefetches for pre XP Athlons currently
++   That should be fixed. */
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(const void *x)
 +{
-+	return _hypercall2(int, sched_op_compat, cmd, arg);
++	alternative_input(ASM_NOP4,
++			  "prefetchnta (%1)",
++			  X86_FEATURE_XMM,
++			  "r" (x));
 +}
 +
-+static inline int
-+HYPERVISOR_sched_op(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, sched_op, cmd, arg);
-+}
++#define ARCH_HAS_PREFETCH
++#define ARCH_HAS_PREFETCHW
++#define ARCH_HAS_SPINLOCK_PREFETCH
 +
-+static inline long
-+HYPERVISOR_set_timer_op(
-+	u64 timeout)
++/* 3dnow! prefetch to get an exclusive cache line. Useful for 
++   spinlocks to avoid one state transition in the cache coherency protocol. */
++static inline void prefetchw(const void *x)
 +{
-+	unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+	unsigned long timeout_lo = (unsigned long)timeout;
-+	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++	alternative_input(ASM_NOP4,
++			  "prefetchw (%1)",
++			  X86_FEATURE_3DNOW,
++			  "r" (x));
 +}
++#define spin_lock_prefetch(x)	prefetchw(x)
 +
-+static inline int
-+HYPERVISOR_platform_op(
-+	struct xen_platform_op *platform_op)
-+{
-+	platform_op->interface_version = XENPF_INTERFACE_VERSION;
-+	return _hypercall1(int, platform_op, platform_op);
-+}
++extern void select_idle_routine(const struct cpuinfo_x86 *c);
 +
-+static inline int
-+HYPERVISOR_set_debugreg(
-+	int reg, unsigned long value)
-+{
-+	return _hypercall2(int, set_debugreg, reg, value);
-+}
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
 +
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+	int reg)
-+{
-+	return _hypercall1(unsigned long, get_debugreg, reg);
-+}
++extern unsigned long boot_option_idle_override;
++extern void enable_sep_cpu(void);
++extern int sysenter_setup(void);
 +
-+static inline int
-+HYPERVISOR_update_descriptor(
-+	u64 ma, u64 desc)
-+{
-+	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
-+}
++#endif /* __ASM_I386_PROCESSOR_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/ptrace.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/ptrace.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,90 @@
++#ifndef _I386_PTRACE_H
++#define _I386_PTRACE_H
 +
-+static inline int
-+HYPERVISOR_memory_op(
-+	unsigned int cmd, void *arg)
-+{
-+	return _hypercall2(int, memory_op, cmd, arg);
-+}
++#define EBX 0
++#define ECX 1
++#define EDX 2
++#define ESI 3
++#define EDI 4
++#define EBP 5
++#define EAX 6
++#define DS 7
++#define ES 8
++#define FS 9
++#define GS 10
++#define ORIG_EAX 11
++#define EIP 12
++#define CS  13
++#define EFL 14
++#define UESP 15
++#define SS   16
++#define FRAME_SIZE 17
 +
-+static inline int
-+HYPERVISOR_multicall(
-+	multicall_entry_t *call_list, int nr_calls)
-+{
-+	return _hypercall2(int, multicall, call_list, nr_calls);
-+}
++/* this struct defines the way the registers are stored on the 
++   stack during a system call. */
 +
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+	unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+	unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+	pte_hi = new_val.pte_high;
-+#endif
-+	return _hypercall4(int, update_va_mapping, va,
-+			   new_val.pte_low, pte_hi, flags);
-+}
++struct pt_regs {
++	long ebx;
++	long ecx;
++	long edx;
++	long esi;
++	long edi;
++	long ebp;
++	long eax;
++	int  xds;
++	int  xes;
++	long orig_eax;
++	long eip;
++	int  xcs;
++	long eflags;
++	long esp;
++	int  xss;
++};
 +
-+static inline int
-+HYPERVISOR_event_channel_op(
-+	int cmd, void *arg)
-+{
-+	int rc = _hypercall2(int, event_channel_op, cmd, arg);
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS            12
++#define PTRACE_SETREGS            13
++#define PTRACE_GETFPREGS          14
++#define PTRACE_SETFPREGS          15
++#define PTRACE_GETFPXREGS         18
++#define PTRACE_SETFPXREGS         19
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		struct evtchn_op op;
-+		op.cmd = cmd;
-+		memcpy(&op.u, arg, sizeof(op.u));
-+		rc = _hypercall1(int, event_channel_op_compat, &op);
-+		memcpy(arg, &op.u, sizeof(op.u));
-+	}
-+#endif
++#define PTRACE_OLDSETOPTIONS         21
 +
-+	return rc;
-+}
++#define PTRACE_GET_THREAD_AREA    25
++#define PTRACE_SET_THREAD_AREA    26
 +
-+static inline int
-+HYPERVISOR_acm_op(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, acm_op, cmd, arg);
-+}
++#define PTRACE_SYSEMU		  31
++#define PTRACE_SYSEMU_SINGLESTEP  32
 +
-+static inline int
-+HYPERVISOR_xen_version(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, xen_version, cmd, arg);
-+}
++#ifdef __KERNEL__
 +
-+static inline int
-+HYPERVISOR_console_io(
-+	int cmd, int count, char *str)
++#include <asm/vm86.h>
++
++struct task_struct;
++extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
++
++/*
++ * user_mode_vm(regs) determines whether a register set came from user mode.
++ * This is true if V8086 mode was enabled OR if the register set was from
++ * protected mode with RPL-3 CS value.  This tricky test checks that with
++ * one comparison.  Many places in the kernel can bypass this full check
++ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ */
++static inline int user_mode(struct pt_regs *regs)
 +{
-+	return _hypercall3(int, console_io, cmd, count, str);
++	return (regs->xcs & 2) != 0;
 +}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+	int cmd, void *arg)
++static inline int user_mode_vm(struct pt_regs *regs)
 +{
-+	int rc = _hypercall2(int, physdev_op, cmd, arg);
++	return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
++}
++#define instruction_pointer(regs) ((regs)->eip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++#endif /* __KERNEL__ */
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		struct physdev_op op;
-+		op.cmd = cmd;
-+		memcpy(&op.u, arg, sizeof(op.u));
-+		rc = _hypercall1(int, physdev_op_compat, &op);
-+		memcpy(arg, &op.u, sizeof(op.u));
-+	}
 +#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/scatterlist.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/scatterlist.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,22 @@
++#ifndef _I386_SCATTERLIST_H
++#define _I386_SCATTERLIST_H
 +
-+	return rc;
-+}
++struct scatterlist {
++    struct page		*page;
++    unsigned int	offset;
++    unsigned int	length;
++    dma_addr_t		dma_address;
++    unsigned int	dma_length;
++};
 +
-+static inline int
-+HYPERVISOR_grant_table_op(
-+	unsigned int cmd, void *uop, unsigned int count)
-+{
-+	return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
++/* These macros should be used after a pci_map_sg call has been done
++ * to get bus addresses of each of the SG entries and their lengths.
++ * You should only work with the number of sg entries pci_map_sg
++ * returns.
++ */
++#define sg_dma_address(sg)	((sg)->dma_address)
++#define sg_dma_len(sg)		((sg)->dma_length)
 +
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+	unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+	pte_hi = new_val.pte_high;
-+#endif
-+	return _hypercall5(int, update_va_mapping_otherdomain, va,
-+			   new_val.pte_low, pte_hi, flags, domid);
-+}
++#define ISA_DMA_THRESHOLD (0x00ffffff)
 +
-+static inline int
-+HYPERVISOR_vm_assist(
-+	unsigned int cmd, unsigned int type)
-+{
-+	return _hypercall2(int, vm_assist, cmd, type);
-+}
++#endif /* !(_I386_SCATTERLIST_H) */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/segment.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/segment.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,117 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
 +
-+static inline int
-+HYPERVISOR_vcpu_op(
-+	int cmd, int vcpuid, void *extra_args)
-+{
-+	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
++/*
++ * The layout of the per-CPU GDT under Linux:
++ *
++ *   0 - null
++ *   1 - reserved
++ *   2 - reserved
++ *   3 - reserved
++ *
++ *   4 - unused			<==== new cacheline
++ *   5 - unused
++ *
++ *  ------- start of TLS (Thread-Local Storage) segments:
++ *
++ *   6 - TLS segment #1			[ glibc's TLS segment ]
++ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
++ *   8 - TLS segment #3
++ *   9 - reserved
++ *  10 - reserved
++ *  11 - reserved
++ *
++ *  ------- start of kernel segments:
++ *
++ *  12 - kernel code segment		<==== new cacheline
++ *  13 - kernel data segment
++ *  14 - default user CS
++ *  15 - default user DS
++ *  16 - TSS
++ *  17 - LDT
++ *  18 - PNPBIOS support (16->32 gate)
++ *  19 - PNPBIOS support
++ *  20 - PNPBIOS support
++ *  21 - PNPBIOS support
++ *  22 - PNPBIOS support
++ *  23 - APM BIOS support
++ *  24 - APM BIOS support
++ *  25 - APM BIOS support 
++ *
++ *  26 - ESPFIX small SS
++ *  27 - unused
++ *  28 - unused
++ *  29 - unused
++ *  30 - unused
++ *  31 - TSS for double fault handler
++ */
++#define GDT_ENTRY_TLS_ENTRIES	3
++#define GDT_ENTRY_TLS_MIN	6
++#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
 +
-+static inline int
-+HYPERVISOR_suspend(
-+	unsigned long srec)
-+{
-+	struct sched_shutdown sched_shutdown = {
-+		.reason = SHUTDOWN_suspend
-+	};
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
 +
-+	int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+			     &sched_shutdown, srec);
++#define GDT_ENTRY_DEFAULT_USER_CS	14
++#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
-+				 SHUTDOWN_suspend, srec);
-+#endif
++#define GDT_ENTRY_DEFAULT_USER_DS	15
++#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++
++#define GDT_ENTRY_KERNEL_BASE	12
++
++#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
++#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
++#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
++#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
++#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
++
++#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
++#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
++
++#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
++#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
 +
-+	return rc;
-+}
++#define GDT_ENTRY_DOUBLEFAULT_TSS	31
 +
-+static inline int
-+HYPERVISOR_nmi_op(
-+	unsigned long op, void *arg)
-+{
-+	return _hypercall2(int, nmi_op, op, arg);
-+}
++/*
++ * The GDT has 32 entries
++ */
++#define GDT_ENTRIES 32
 +
-+static inline unsigned long
-+HYPERVISOR_hvm_op(
-+    int op, void *arg)
-+{
-+    return _hypercall2(unsigned long, hvm_op, op, arg);
-+}
++#define GDT_SIZE (GDT_ENTRIES * 8)
 +
-+static inline int
-+HYPERVISOR_callback_op(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, callback_op, cmd, arg);
-+}
++/* Simple and small GDT entries for booting only */
 +
-+static inline int
-+HYPERVISOR_xenoprof_op(
-+	int op, void *arg)
-+{
-+	return _hypercall2(int, xenoprof_op, op, arg);
-+}
++#define GDT_ENTRY_BOOT_CS		2
++#define __BOOT_CS	(GDT_ENTRY_BOOT_CS * 8)
 +
-+static inline int
-+HYPERVISOR_kexec_op(
-+	unsigned long op, void *args)
-+{
-+	return _hypercall2(int, kexec_op, op, args);
-+}
++#define GDT_ENTRY_BOOT_DS		(GDT_ENTRY_BOOT_CS + 1)
++#define __BOOT_DS	(GDT_ENTRY_BOOT_DS * 8)
 +
++/* The PnP BIOS entries in the GDT */
++#define GDT_ENTRY_PNPBIOS_CS32		(GDT_ENTRY_PNPBIOS_BASE + 0)
++#define GDT_ENTRY_PNPBIOS_CS16		(GDT_ENTRY_PNPBIOS_BASE + 1)
++#define GDT_ENTRY_PNPBIOS_DS		(GDT_ENTRY_PNPBIOS_BASE + 2)
++#define GDT_ENTRY_PNPBIOS_TS1		(GDT_ENTRY_PNPBIOS_BASE + 3)
++#define GDT_ENTRY_PNPBIOS_TS2		(GDT_ENTRY_PNPBIOS_BASE + 4)
 +
++/* The PnP BIOS selectors */
++#define PNP_CS32   (GDT_ENTRY_PNPBIOS_CS32 * 8)	/* segment for calling fn */
++#define PNP_CS16   (GDT_ENTRY_PNPBIOS_CS16 * 8)	/* code segment for BIOS */
++#define PNP_DS     (GDT_ENTRY_PNPBIOS_DS * 8)	/* data segment for BIOS */
++#define PNP_TS1    (GDT_ENTRY_PNPBIOS_TS1 * 8)	/* transfer data segment */
++#define PNP_TS2    (GDT_ENTRY_PNPBIOS_TS2 * 8)	/* another data segment */
 +
-+#endif /* __HYPERCALL_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hypervisor.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hypervisor.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/hypervisor.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,265 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++/*
++ * The interrupt descriptor table has room for 256 idt's,
++ * the global descriptor table is dependent on the number
++ * of tasks we can have..
 + */
++#define IDT_ENTRIES 256
 +
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <linux/errno.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/sched.h>
-+#include <xen/interface/nmi.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+#if defined(__i386__)
-+#  ifdef CONFIG_X86_PAE
-+#   include <asm-generic/pgtable-nopud.h>
-+#  else
-+#   include <asm-generic/pgtable-nopmd.h>
-+#  endif
-+#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
-+#  include <asm-generic/pgtable-nopud.h>
 +#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/setup.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/setup.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,81 @@
++/*
++ *	Just a place holder. We don't want to have to test x86 before
++ *	we include stuff
++ */
 +
-+extern shared_info_t *HYPERVISOR_shared_info;
++#ifndef _i386_SETUP_H
++#define _i386_SETUP_H
 +
-+#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
-+#ifdef CONFIG_SMP
-+#define current_vcpu_info() vcpu_info(smp_processor_id())
-+#else
-+#define current_vcpu_info() vcpu_info(0)
-+#endif
++#ifdef __KERNEL__
++#include <linux/pfn.h>
 +
-+#ifdef CONFIG_X86_32
-+extern unsigned long hypervisor_virt_start;
++/*
++ * Reserved space for vmalloc and iomap - defined in asm/page.h
++ */
++#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
++#define MAX_NONPAE_PFN	(1 << 20)
 +#endif
 +
-+/* arch/xen/i386/kernel/setup.c */
-+extern start_info_t *xen_start_info;
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
-+#else
-+#define is_initial_xendomain() 0
-+#endif
++#define PARAM_SIZE 4096
++#define COMMAND_LINE_SIZE 256
 +
-+/* arch/xen/kernel/evtchn.c */
-+/* Force a proper event-channel callback from Xen. */
-+void force_evtchn_callback(void);
++#define OLD_CL_MAGIC_ADDR	0x90020
++#define OLD_CL_MAGIC		0xA33F
++#define OLD_CL_BASE_ADDR	0x90000
++#define OLD_CL_OFFSET		0x90022
++#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
 +
-+/* arch/xen/kernel/process.c */
-+void xen_cpu_idle (void);
++#ifndef __ASSEMBLY__
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++extern unsigned char boot_params[PARAM_SIZE];
 +
-+/* arch/xen/i386/kernel/hypervisor.c */
-+void do_hypervisor_callback(struct pt_regs *regs);
++#define PARAM	(boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
++#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
++#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
++#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
++#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
 +
-+/* arch/xen/i386/mm/hypervisor.c */
 +/*
-+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
-+ * be MACHINE addresses.
++ * Do NOT EVER look at the BIOS memory size location.
++ * It does not work on many machines.
 + */
++#define LOWMEMSIZE()	(0x9f000)
 +
-+void xen_pt_switch(unsigned long ptr);
-+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
-+void xen_load_gs(unsigned int selector); /* x86_64 only */
-+void xen_tlb_flush(void);
-+void xen_invlpg(unsigned long ptr);
++struct e820entry;
 +
-+void xen_l1_entry_update(pte_t *ptr, pte_t val);
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
-+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
-+void xen_pgd_pin(unsigned long ptr);
-+void xen_pgd_unpin(unsigned long ptr);
++char * __init machine_specific_memory_setup(void);
 +
-+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
++void __init add_memory_region(unsigned long long start,
++			      unsigned long long size, int type);
 +
-+#ifdef CONFIG_SMP
++#endif /* __ASSEMBLY__ */
++
++#endif /* _i386_SETUP_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/smp.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/smp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,103 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/threads.h>
 +#include <linux/cpumask.h>
-+void xen_tlb_flush_all(void);
-+void xen_invlpg_all(unsigned long ptr);
-+void xen_tlb_flush_mask(cpumask_t *mask);
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
 +#endif
 +
-+/* Returns zero on success else negative errno. */
-+int xen_create_contiguous_region(
-+    unsigned long vstart, unsigned int order, unsigned int address_bits);
-+void xen_destroy_contiguous_region(
-+    unsigned long vstart, unsigned int order);
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#endif
++#endif
 +
-+struct page;
++#define BAD_APICID 0xFFu
++#ifdef CONFIG_SMP
++#ifndef __ASSEMBLY__
 +
-+int xen_limit_pages_to_max_mfn(
-+	struct page *pages, unsigned int order, unsigned int address_bits);
++/*
++ * Private routines/data
++ */
++ 
++extern void smp_alloc_memory(void);
++extern int pic_mode;
++extern int smp_num_siblings;
++extern cpumask_t cpu_sibling_map[];
++extern cpumask_t cpu_core_map[];
 +
-+/* Turn jiffies into Xen system time. */
-+u64 jiffies_to_st(unsigned long jiffies);
++extern void (*mtrr_hook) (void);
++extern void zap_low_mappings (void);
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
 +
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
++#define MAX_APICID 256
++extern u8 x86_cpu_to_apicid[];
 +
-+#include <xen/hypercall.h>
++#define cpu_physical_id(cpu)	x86_cpu_to_apicid[cpu]
 +
-+#if defined(CONFIG_X86_64)
-+#define MULTI_UVMFLAGS_INDEX 2
-+#define MULTI_UVMDOMID_INDEX 3
-+#else
-+#define MULTI_UVMFLAGS_INDEX 3
-+#define MULTI_UVMDOMID_INDEX 4
++#ifdef CONFIG_HOTPLUG_CPU
++extern void cpu_exit_clear(void);
++extern void cpu_uninit(void);
 +#endif
 +
-+#define is_running_on_xen() 1
-+
-+static inline int
-+HYPERVISOR_yield(
-+	void)
-+{
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
-+#endif
++/*
++ * This function is needed by all SMP systems. It must _always_ be valid
++ * from the initial startup. We map APIC_BASE very early in page_setup(),
++ * so this is correct in the x86 case.
++ */
++#define raw_smp_processor_id() (current_thread_info()->cpu)
 +
-+	return rc;
-+}
++extern cpumask_t cpu_possible_map;
++#define cpu_callin_map cpu_possible_map
 +
-+static inline int
-+HYPERVISOR_block(
-+	void)
++/* We don't mark CPUs online until __cpu_up(), so we need another measure */
++static inline int num_booting_cpus(void)
 +{
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++	return cpus_weight(cpu_possible_map);
++}
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
-+#endif
++#ifdef CONFIG_X86_LOCAL_APIC
 +
-+	return rc;
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
 +}
++#endif
 +
-+static inline int
-+HYPERVISOR_shutdown(
-+	unsigned int reason)
++static __inline int logical_smp_processor_id(void)
 +{
-+	struct sched_shutdown sched_shutdown = {
-+		.reason = reason
-+	};
-+
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason);
 +#endif
 +
-+	return rc;
-+}
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++#endif /* !__ASSEMBLY__ */
 +
-+static inline int
-+HYPERVISOR_poll(
-+	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
-+{
-+	int rc;
-+	struct sched_poll sched_poll = {
-+		.nr_ports = nr_ports,
-+		.timeout = jiffies_to_st(timeout)
-+	};
-+	set_xen_guest_handle(sched_poll.ports, ports);
++#else /* CONFIG_SMP */
 +
-+	rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
-+#endif
++#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
 +
-+	return rc;
-+}
++#define NO_PROC_ID		0xFF		/* No processor magic marker */
 +
-+static inline void
-+MULTI_update_va_mapping(
-+    multicall_entry_t *mcl, unsigned long va,
-+    pte_t new_val, unsigned long flags)
-+{
-+    mcl->op = __HYPERVISOR_update_va_mapping;
-+    mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+    mcl->args[1] = new_val.pte;
-+#elif defined(CONFIG_X86_PAE)
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = new_val.pte_high;
-+#else
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = 0;
 +#endif
-+    mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
-+}
-+
-+static inline void
-+MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
-+		     void *uop, unsigned int count)
-+{
-+    mcl->op = __HYPERVISOR_grant_table_op;
-+    mcl->args[0] = cmd;
-+    mcl->args[1] = (unsigned long)uop;
-+    mcl->args[2] = count;
-+}
-+
-+static inline void
-+MULTI_update_va_mapping_otherdomain(
-+    multicall_entry_t *mcl, unsigned long va,
-+    pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+    mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
-+    mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+    mcl->args[1] = new_val.pte;
-+#elif defined(CONFIG_X86_PAE)
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = new_val.pte_high;
-+#else
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = 0;
 +#endif
-+    mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
-+    mcl->args[MULTI_UVMDOMID_INDEX] = domid;
-+}
-+
-+#endif /* __HYPERVISOR_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/io.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/io.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/io.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,390 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/spinlock.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/spinlock.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,202 @@
++#ifndef __ASM_SPINLOCK_H
++#define __ASM_SPINLOCK_H
 +
-+#include <linux/string.h>
++#include <asm/atomic.h>
++#include <asm/rwlock.h>
++#include <asm/page.h>
 +#include <linux/compiler.h>
 +
 +/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
++ * Your basic SMP spinlocks, allowing only a single CPU anywhere
 + *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
-+ */
-+
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
++ * Simple spin lock operations.  There are two variants, one clears IRQ's
++ * on the local processor, one does not.
 + *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ * We make no fairness assumptions. They have a cost.
 + *
-+ *		Linus
++ * (the type definitions are in asm/spinlock_types.h)
 + */
 +
-+ /*
-+  *  Bit simplified and optimized by Jan Hubicka
-+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+  *
-+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+  *  isa_read[wl] and isa_write[wl] fixed
-+  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
-+  */
-+
-+#define IO_SPACE_LIMIT 0xffff
-+
-+#define XQUAD_PORTIO_BASE 0xfe400000
-+#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
-+
-+#ifdef __KERNEL__
-+
-+#include <asm-generic/iomap.h>
-+
-+#include <linux/vmalloc.h>
-+#include <asm/fixmap.h>
++#define __raw_spin_is_locked(x) \
++		(*(volatile signed char *)(&(x)->slock) <= 0)
 +
-+/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
-+ */
-+#define xlate_dev_mem_ptr(p, sz)	ioremap(p, sz)
-+#define xlate_dev_mem_ptr_unmap(p)	iounmap(p)
++#define __raw_spin_lock_string \
++	"\n1:\t" \
++	LOCK_PREFIX " ; decb %0\n\t" \
++	"jns 3f\n" \
++	"2:\t" \
++	"rep;nop\n\t" \
++	"cmpb $0,%0\n\t" \
++	"jle 2b\n\t" \
++	"jmp 1b\n" \
++	"3:\n\t"
 +
 +/*
-+ * Convert a virtual cached pointer to an uncached pointer
++ * NOTE: there's an irqs-on section here, which normally would have to be
++ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
++ * __raw_spin_lock_string_flags().
 + */
-+#define xlate_dev_kmem_ptr(p)	p
++#define __raw_spin_lock_string_flags \
++	"\n1:\t" \
++	LOCK_PREFIX " ; decb %0\n\t" \
++	"jns 5f\n" \
++	"2:\t" \
++	"testl $0x200, %1\n\t" \
++	"jz 4f\n\t" \
++	"#sti\n" \
++	"3:\t" \
++	"rep;nop\n\t" \
++	"cmpb $0, %0\n\t" \
++	"jle 3b\n\t" \
++	"#cli\n\t" \
++	"jmp 1b\n" \
++	"4:\t" \
++	"rep;nop\n\t" \
++	"cmpb $0, %0\n\t" \
++	"jg 1b\n\t" \
++	"jmp 4b\n" \
++	"5:\n\t"
 +
-+/**
-+ *	virt_to_phys	-	map virtual addresses to physical
-+ *	@address: address to remap
-+ *
-+ *	The returned physical address is the physical (CPU) mapping for
-+ *	the memory address given. It is only valid to use this function on
-+ *	addresses directly mapped or allocated via kmalloc. 
-+ *
-+ *	This function does not give bus mappings for DMA transfers. In
-+ *	almost all conceivable cases a device driver should not be using
-+ *	this function
-+ */
-+ 
-+static inline unsigned long virt_to_phys(volatile void * address)
++static inline void __raw_spin_lock(raw_spinlock_t *lock)
 +{
-+	return __pa(address);
++	asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
 +}
 +
-+/**
-+ *	phys_to_virt	-	map physical address to virtual
-+ *	@address: address to remap
-+ *
-+ *	The returned virtual address is a current CPU mapping for
-+ *	the memory address given. It is only valid to use this function on
-+ *	addresses that have a kernel mapping
-+ *
-+ *	This function does not handle bus mappings for DMA transfers. In
-+ *	almost all conceivable cases a device driver should not be using
-+ *	this function
++/*
++ * It is easier for the lock validator if interrupts are not re-enabled
++ * in the middle of a lock-acquire. This is a performance feature anyway
++ * so we turn it off:
 + */
++#ifndef CONFIG_PROVE_LOCKING
++static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
++{
++	asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
++}
++#endif
 +
-+static inline void * phys_to_virt(unsigned long address)
++static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 +{
-+	return __va(address);
++	char oldval;
++	__asm__ __volatile__(
++		"xchgb %b0,%1"
++		:"=q" (oldval), "+m" (lock->slock)
++		:"0" (0) : "memory");
++	return oldval > 0;
 +}
 +
 +/*
-+ * Change "struct page" to physical address.
++ * __raw_spin_unlock based on writing $1 to the low byte.
++ * This method works. Despite all the confusion.
++ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
++ * (PPro errata 66, 92)
 + */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
-+				  (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
-+				  (unsigned long) (bv)->bv_offset)
 +
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
-+	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+	  bvec_to_pseudophys((vec2))))
++#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
 +
-+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++#define __raw_spin_unlock_string \
++	"movb $1,%0" \
++		:"+m" (lock->slock) : : "memory"
 +
-+/**
-+ * ioremap     -   map bus memory into CPU space
-+ * @offset:    bus address of the memory
-+ * @size:      size of the resource to map
-+ *
-+ * ioremap performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address. 
-+ */
 +
-+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 +{
-+	return __ioremap(offset, size, 0);
++	__asm__ __volatile__(
++		__raw_spin_unlock_string
++	);
 +}
 +
-+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
++#else
 +
-+/*
-+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
-+ * mappings, before the real ioremap() is functional.
-+ * A boot-time mapping is currently limited to at most 16 pages.
-+ */
-+extern void *bt_ioremap(unsigned long offset, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
++#define __raw_spin_unlock_string \
++	"xchgb %b0, %1" \
++		:"=q" (oldval), "+m" (lock->slock) \
++		:"0" (oldval) : "memory"
 +
-+/* Use early IO mappings for DMI because it's initialized early */
-+#define dmi_ioremap bt_ioremap
-+#define dmi_iounmap bt_iounmap
-+#define dmi_alloc alloc_bootmem
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++{
++	char oldval = 1;
 +
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++	__asm__ __volatile__(
++		__raw_spin_unlock_string
++	);
++}
++
++#endif
++
++#define __raw_spin_unlock_wait(lock) \
++	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 +
 +/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
++ * Read-write spinlocks, allowing multiple readers
++ * but only one writer.
 + *
-+ * Allow them on x86 for legacy drivers, though.
++ * NOTE! it is quite common to have readers in interrupts
++ * but no interrupt writers. For those circumstances we
++ * can "mix" irq-safe locks - any writer needs to get a
++ * irq-safe write-lock, but readers can get non-irqsafe
++ * read-locks.
++ *
++ * On x86, we implement read-write locks as a 32-bit counter
++ * with the high bit (sign) being the "contended" bit.
++ *
++ * The inline assembly is non-obvious. Think about it.
++ *
++ * Changed to use the same technique as rw semaphores.  See
++ * semaphore.h for details.  -ben
++ *
++ * the helpers are in arch/i386/kernel/semaphore.c
 + */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
 +
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
++/**
++ * read_can_lock - would read_trylock() succeed?
++ * @lock: the rwlock in question.
 + */
++#define __raw_read_can_lock(x)		((int)(x)->lock > 0)
 +
-+static inline unsigned char readb(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned char __force *) addr;
-+}
-+static inline unsigned short readw(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned short __force *) addr;
-+}
-+static inline unsigned int readl(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned int __force *) addr;
-+}
-+#define readb_relaxed(addr) readb(addr)
-+#define readw_relaxed(addr) readw(addr)
-+#define readl_relaxed(addr) readl(addr)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
++/**
++ * write_can_lock - would write_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define __raw_write_can_lock(x)		((x)->lock == RW_LOCK_BIAS)
 +
-+static inline void writeb(unsigned char b, volatile void __iomem *addr)
-+{
-+	*(volatile unsigned char __force *) addr = b;
-+}
-+static inline void writew(unsigned short b, volatile void __iomem *addr)
++static inline void __raw_read_lock(raw_rwlock_t *rw)
 +{
-+	*(volatile unsigned short __force *) addr = b;
++	__build_read_lock(rw, "__read_lock_failed");
 +}
-+static inline void writel(unsigned int b, volatile void __iomem *addr)
++
++static inline void __raw_write_lock(raw_rwlock_t *rw)
 +{
-+	*(volatile unsigned int __force *) addr = b;
++	__build_write_lock(rw, "__write_lock_failed");
 +}
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+
-+#define mmiowb()
 +
-+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
++static inline int __raw_read_trylock(raw_rwlock_t *lock)
 +{
-+	memset((void __force *) addr, val, count);
++	atomic_t *count = (atomic_t *)lock;
++	atomic_dec(count);
++	if (atomic_read(count) >= 0)
++		return 1;
++	atomic_inc(count);
++	return 0;
 +}
-+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
++
++static inline int __raw_write_trylock(raw_rwlock_t *lock)
 +{
-+	__memcpy(dst, (void __force *) src, count);
++	atomic_t *count = (atomic_t *)lock;
++	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
++		return 1;
++	atomic_add(RW_LOCK_BIAS, count);
++	return 0;
 +}
-+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
++
++static inline void __raw_read_unlock(raw_rwlock_t *rw)
 +{
-+	__memcpy((void __force *) dst, src, count);
++	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
 +}
 +
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+/*
-+ * Again, i386 does not require mem IO specific function.
-+ */
-+
-+#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
-+
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
-+ *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
-+ */
-+ 
-+static inline int check_signature(volatile void __iomem * io_addr,
-+	const unsigned char *signature, int length)
++static inline void __raw_write_unlock(raw_rwlock_t *rw)
 +{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
++	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
++				 : "+m" (rw->lock) : : "memory");
 +}
 +
-+/*
-+ *	Cache management
-+ *
-+ *	This needed for two cases
-+ *	1. Out of order aware processors
-+ *	2. Accidentally out of order processors (PPro errata #51)
-+ */
-+ 
-+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
++#endif /* __ASM_SPINLOCK_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/swiotlb.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/swiotlb.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,43 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
 +
-+static inline void flush_write_buffers(void)
-+{
-+	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-+}
++/* SWIOTLB interface */
 +
-+#define dma_cache_inv(_start,_size)		flush_write_buffers()
-+#define dma_cache_wback(_start,_size)		flush_write_buffers()
-+#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++				      int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++				  size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++					 dma_addr_t dev_addr,
++					 size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++					    dma_addr_t dev_addr,
++					    size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++				     struct scatterlist *sg, int nelems,
++				     int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++					struct scatterlist *sg, int nelems,
++					int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++		      int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++			 int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++                                   unsigned long offset, size_t size,
++                                   enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++                               size_t size, enum dma_data_direction direction);
++#endif
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
 +
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
 +#else
++#define swiotlb 0
++#endif
 +
-+/* Nothing to do */
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/synch_bitops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/synch_bitops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,126 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
 +
-+#define dma_cache_inv(_start,_size)		do { } while (0)
-+#define dma_cache_wback(_start,_size)		do { } while (0)
-+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
-+#define flush_write_buffers()
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
 +
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
 +#endif
 +
-+#endif /* __KERNEL__ */
++#define ADDR (*(volatile long *) addr)
 +
-+#ifdef SLOW_IO_BY_JUMPING
-+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
-+#else
-+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-+#endif
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ ( 
++        "lock btsl %1,%0"
++        : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
 +
-+static inline void slow_down_io(void) {
-+	__asm__ __volatile__(
-+		__SLOW_DOWN_IO
-+#ifdef REALLY_SLOW_IO
-+		__SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-+#endif
-+		: : );
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ (
++        "lock btrl %1,%0"
++        : "+m" (ADDR) : "Ir" (nr) : "memory" );
 +}
 +
-+#ifdef CONFIG_X86_NUMAQ
-+extern void *xquad_portio;    /* Where the IO area was mapped */
-+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
-+	if (xquad_portio) \
-+		write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
-+	else \
-+		out##bwl##_local(value, port); \
-+} \
-+static inline void out##bwl(unsigned type value, int port) { \
-+	out##bwl##_quad(value, port, 0); \
-+} \
-+static inline unsigned type in##bwl##_quad(int port, int quad) { \
-+	if (xquad_portio) \
-+		return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
-+	else \
-+		return in##bwl##_local(port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+	return in##bwl##_quad(port, 0); \
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ (
++        "lock btcl %1,%0"
++        : "+m" (ADDR) : "Ir" (nr) : "memory" );
 +}
-+#else
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl(unsigned type value, int port) { \
-+	out##bwl##_local(value, port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+	return in##bwl##_local(port); \
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++    __asm__ __volatile__ (
++        "lock btsl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
 +}
-+#endif
 +
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++    __asm__ __volatile__ (
++        "lock btrl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
++}
 +
-+#define BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_local(unsigned type value, int port) { \
-+	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-+} \
-+static inline unsigned type in##bwl##_local(int port) { \
-+	unsigned type value; \
-+	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
-+	return value; \
-+} \
-+static inline void out##bwl##_local_p(unsigned type value, int port) { \
-+	out##bwl##_local(value, port); \
-+	slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_local_p(int port) { \
-+	unsigned type value = in##bwl##_local(port); \
-+	slow_down_io(); \
-+	return value; \
-+} \
-+__BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_p(unsigned type value, int port) { \
-+	out##bwl(value, port); \
-+	slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_p(int port) { \
-+	unsigned type value = in##bwl(port); \
-+	slow_down_io(); \
-+	return value; \
-+} \
-+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
-+	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-+} \
-+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
-+	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++
++    __asm__ __volatile__ (
++        "lock btcl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
 +}
 +
-+BUILDIO(b,b,char)
-+BUILDIO(w,w,short)
-+BUILDIO(l,,int)
++struct __synch_xchg_dummy { unsigned long a[100]; };
++#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
 +
-+/* We will be supplying our own /dev/mem implementation */
-+#define ARCH_HAS_DEV_MEM
++#define synch_cmpxchg(ptr, old, new) \
++((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
++                                     (unsigned long)(old), \
++                                     (unsigned long)(new), \
++                                     sizeof(*(ptr))))
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/irqflags.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/irqflags.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/irqflags.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,127 @@
-+/*
-+ * include/asm-i386/irqflags.h
-+ *
-+ * IRQ flags handling
-+ *
-+ * This file gets included from lowlevel asm headers too, to provide
-+ * wrapped versions of the local_irq_*() APIs, based on the
-+ * raw_local_irq_*() functions from the lowlevel headers.
-+ */
-+#ifndef _ASM_IRQFLAGS_H
-+#define _ASM_IRQFLAGS_H
++static inline unsigned long __synch_cmpxchg(volatile void *ptr,
++					    unsigned long old,
++					    unsigned long new, int size)
++{
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#ifdef CONFIG_X86_64
++	case 4:
++		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++	case 8:
++		__asm__ __volatile__("lock; cmpxchgq %1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#else
++	case 4:
++		__asm__ __volatile__("lock; cmpxchgl %1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#endif
++	}
++	return old;
++}
 +
-+#ifndef __ASSEMBLY__
++#define synch_test_bit test_bit
 +
-+/* 
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
++#define synch_cmpxchg_subword synch_cmpxchg
 +
-+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/system.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/system.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,488 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
 +
-+#define raw_local_save_flags(flags) \
-+		do { (flags) = __raw_local_save_flags(); } while (0)
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/cpufeature.h>
++#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
 +
-+#define raw_local_irq_restore(x)					\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	_vcpu = current_vcpu_info();					\
-+	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
-+		barrier(); /* unmask then check (avoid races) */	\
-+		if (unlikely(_vcpu->evtchn_upcall_pending))		\
-+			force_evtchn_callback();			\
-+	}								\
-+} while (0)
++#ifdef __KERNEL__
 +
-+#define raw_local_irq_disable()						\
-+do {									\
-+	current_vcpu_info()->evtchn_upcall_mask = 1;			\
-+	barrier();							\
-+} while (0)
++struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
++extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
 +
-+#define raw_local_irq_enable()						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	_vcpu = current_vcpu_info();					\
-+	_vcpu->evtchn_upcall_mask = 0;					\
-+	barrier(); /* unmask then check (avoid races) */		\
-+	if (unlikely(_vcpu->evtchn_upcall_pending))			\
-+		force_evtchn_callback();				\
++/*
++ * Saving eflags is important. It switches not only IOPL between tasks,
++ * it also protects other tasks from NT leaking through sysenter etc.
++ */
++#define switch_to(prev,next,last) do {					\
++	unsigned long esi,edi;						\
++	asm volatile("pushfl\n\t"		/* Save flags */	\
++		     "pushl %%ebp\n\t"					\
++		     "movl %%esp,%0\n\t"	/* save ESP */		\
++		     "movl %5,%%esp\n\t"	/* restore ESP */	\
++		     "movl $1f,%1\n\t"		/* save EIP */		\
++		     "pushl %6\n\t"		/* restore EIP */	\
++		     "jmp __switch_to\n"				\
++		     "1:\t"						\
++		     "popl %%ebp\n\t"					\
++		     "popfl"						\
++		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
++		      "=a" (last),"=S" (esi),"=D" (edi)			\
++		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
++		      "2" (prev), "d" (next));				\
 +} while (0)
 +
++#define _set_base(addr,base) do { unsigned long __pr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++	"rorl $16,%%edx\n\t" \
++	"movb %%dl,%2\n\t" \
++	"movb %%dh,%3" \
++	:"=&d" (__pr) \
++	:"m" (*((addr)+2)), \
++	 "m" (*((addr)+4)), \
++	 "m" (*((addr)+7)), \
++         "0" (base) \
++        ); } while(0)
++
++#define _set_limit(addr,limit) do { unsigned long __lr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++	"rorl $16,%%edx\n\t" \
++	"movb %2,%%dh\n\t" \
++	"andb $0xf0,%%dh\n\t" \
++	"orb %%dh,%%dl\n\t" \
++	"movb %%dl,%2" \
++	:"=&d" (__lr) \
++	:"m" (*(addr)), \
++	 "m" (*((addr)+6)), \
++	 "0" (limit) \
++        ); } while(0)
++
++#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
++#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
++
 +/*
-+ * Used in the idle loop; sti takes one instruction cycle
-+ * to complete:
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
 + */
-+void raw_safe_halt(void);
++#define loadsegment(seg,value)			\
++	asm volatile("\n"			\
++		"1:\t"				\
++		"mov %0,%%" #seg "\n"		\
++		"2:\n"				\
++		".section .fixup,\"ax\"\n"	\
++		"3:\t"				\
++		"pushl $0\n\t"			\
++		"popl %%" #seg "\n\t"		\
++		"jmp 2b\n"			\
++		".previous\n"			\
++		".section __ex_table,\"a\"\n\t"	\
++		".align 4\n\t"			\
++		".long 1b,3b\n"			\
++		".previous"			\
++		: :"rm" (value))
 +
 +/*
-+ * Used when interrupts are already enabled or to
-+ * shutdown the processor:
++ * Save a segment register away
 + */
-+void halt(void);
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
-+{
-+	return (flags != 0);
-+}
++#define savesegment(seg, value) \
++	asm volatile("mov %%" #seg ",%0":"=rm" (value))
 +
-+#define raw_irqs_disabled()						\
-+({									\
-+	unsigned long flags = __raw_local_save_flags();			\
-+									\
-+	raw_irqs_disabled_flags(flags);					\
++#define read_cr0() ({ \
++	unsigned int __dummy; \
++	__asm__ __volatile__( \
++		"movl %%cr0,%0\n\t" \
++		:"=r" (__dummy)); \
++	__dummy; \
 +})
++#define write_cr0(x) \
++	__asm__ __volatile__("movl %0,%%cr0": :"r" (x))
 +
-+/*
-+ * For spinlocks, etc:
-+ */
-+#define __raw_local_irq_save()						\
-+({									\
-+	unsigned long flags = __raw_local_save_flags();			\
-+									\
-+	raw_local_irq_disable();					\
-+									\
-+	flags;								\
-+})
++#define read_cr2() (current_vcpu_info()->arch.cr2)
++#define write_cr2(x) \
++	__asm__ __volatile__("movl %0,%%cr2": :"r" (x))
 +
-+#define raw_local_irq_save(flags) \
-+		do { (flags) = __raw_local_irq_save(); } while (0)
++#define read_cr3() ({ \
++	unsigned int __dummy; \
++	__asm__ ( \
++		"movl %%cr3,%0\n\t" \
++		:"=r" (__dummy)); \
++	__dummy = xen_cr3_to_pfn(__dummy); \
++	mfn_to_pfn(__dummy) << PAGE_SHIFT; \
++})
++#define write_cr3(x) ({						\
++	unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT);	\
++	__dummy = xen_pfn_to_cr3(__dummy);			\
++	__asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy));	\
++})
++#define read_cr4() ({ \
++	unsigned int __dummy; \
++	__asm__( \
++		"movl %%cr4,%0\n\t" \
++		:"=r" (__dummy)); \
++	__dummy; \
++})
++#define read_cr4_safe() ({			      \
++	unsigned int __dummy;			      \
++	/* This could fault if %cr4 does not exist */ \
++	__asm__("1: movl %%cr4, %0		\n"   \
++		"2:				\n"   \
++		".section __ex_table,\"a\"	\n"   \
++		".long 1b,2b			\n"   \
++		".previous			\n"   \
++		: "=r" (__dummy): "0" (0));	      \
++	__dummy;				      \
++})
 +
-+#endif /* __ASSEMBLY__ */
++#define write_cr4(x) \
++	__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
 +
 +/*
-+ * Do the CPU's IRQ-state tracing from assembly code. We call a
-+ * C function, so save all the C-clobbered registers:
++ * Clear and set 'TS' bit respectively
 + */
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+
-+# define TRACE_IRQS_ON				\
-+	pushl %eax;				\
-+	pushl %ecx;				\
-+	pushl %edx;				\
-+	call trace_hardirqs_on;			\
-+	popl %edx;				\
-+	popl %ecx;				\
-+	popl %eax;
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
 +
-+# define TRACE_IRQS_OFF				\
-+	pushl %eax;				\
-+	pushl %ecx;				\
-+	pushl %edx;				\
-+	call trace_hardirqs_off;		\
-+	popl %edx;				\
-+	popl %ecx;				\
-+	popl %eax;
++#endif	/* __KERNEL__ */
 +
-+#else
-+# define TRACE_IRQS_ON
-+# define TRACE_IRQS_OFF
-+#endif
++#define wbinvd() \
++	__asm__ __volatile__ ("wbinvd": : :"memory")
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/maddr.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/maddr.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/maddr.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,193 @@
-+#ifndef _I386_MADDR_H
-+#define _I386_MADDR_H
++static inline unsigned long get_limit(unsigned long segment)
++{
++	unsigned long __limit;
++	__asm__("lsll %1,%0"
++		:"=r" (__limit):"r" (segment));
++	return __limit+1;
++}
 +
-+#include <xen/features.h>
-+#include <xen/interface/xen.h>
++#define nop() __asm__ __volatile__ ("nop")
 +
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY	(~0UL)
-+#define FOREIGN_FRAME_BIT	(1UL<<31)
-+#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
 +
-+/* Definitions for machine and pseudophysical addresses. */
-+#ifdef CONFIG_X86_PAE
-+typedef unsigned long long paddr_t;
-+typedef unsigned long long maddr_t;
-+#else
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+#endif
++#define tas(ptr) (xchg((ptr),1))
 +
-+#ifdef CONFIG_XEN
++struct __xchg_dummy { unsigned long a[100]; };
++#define __xg(x) ((struct __xchg_dummy *)(x))
 +
-+extern unsigned long *phys_to_machine_mapping;
-+extern unsigned long  max_mapnr;
 +
-+#undef machine_to_phys_mapping
-+extern unsigned long *machine_to_phys_mapping;
-+extern unsigned int   machine_to_phys_order;
++#ifdef CONFIG_X86_CMPXCHG64
 +
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
++/*
++ * The semantics of XCHGCMP8B are a bit strange, this is why
++ * there is a loop and the loading of %%eax and %%edx has to
++ * be inside. This inlines well in most cases, the cached
++ * cost is around ~38 cycles. (in the future we might want
++ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
++ * might have an implicit FPU-save as a cost, so it's not
++ * clear which path to go.)
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow
++ * the instruction to be executed atomically, see page 3-102
++ * of the instruction set reference 24319102.pdf. We need
++ * the reader side to see the coherent 64bit value.
++ */
++static inline void __set_64bit (unsigned long long * ptr,
++		unsigned int low, unsigned int high)
 +{
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return pfn;
-+	BUG_ON(max_mapnr && pfn >= max_mapnr);
-+	return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++	__asm__ __volatile__ (
++		"\n1:\t"
++		"movl (%0), %%eax\n\t"
++		"movl 4(%0), %%edx\n\t"
++		"lock cmpxchg8b (%0)\n\t"
++		"jnz 1b"
++		: /* no outputs */
++		:	"D"(ptr),
++			"b"(low),
++			"c"(high)
++		:	"ax","dx","memory");
 +}
 +
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++static inline void __set_64bit_constant (unsigned long long *ptr,
++						 unsigned long long value)
 +{
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return 1;
-+	BUG_ON(max_mapnr && pfn >= max_mapnr);
-+	return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
 +}
++#define ll_low(x)	*(((unsigned int*)&(x))+0)
++#define ll_high(x)	*(((unsigned int*)&(x))+1)
 +
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
++static inline void __set_64bit_var (unsigned long long *ptr,
++			 unsigned long long value)
 +{
-+	unsigned long pfn;
-+
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return mfn;
++	__set_64bit(ptr,ll_low(value), ll_high(value));
++}
 +
-+	if (unlikely((mfn >> machine_to_phys_order) != 0))
-+		return max_mapnr;
++#define set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit_constant(ptr, value) : \
++ __set_64bit_var(ptr, value) )
 +
-+	/* The array access can fail (e.g., device space beyond end of RAM). */
-+	asm (
-+		"1:	movl %1,%0\n"
-+		"2:\n"
-+		".section .fixup,\"ax\"\n"
-+		"3:	movl %2,%0\n"
-+		"	jmp  2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 1b,3b\n"
-+		".previous"
-+		: "=r" (pfn)
-+		: "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
++#define _set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
++ __set_64bit(ptr, ll_low(value), ll_high(value)) )
 +
-+	return pfn;
-+}
++#endif
 +
 +/*
-+ * We detect special mappings in one of two ways:
-+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
-+ *     to be outside our maximum possible pseudophys range.
-+ *  2. If the MFN belongs to a different domain then we will certainly
-+ *     not have MFN in our p2m table. Conversely, if the page is ours,
-+ *     then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 + */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 +{
-+	unsigned long pfn = mfn_to_pfn(mfn);
-+	if ((pfn < max_mapnr)
-+	    && !xen_feature(XENFEAT_auto_translated_physmap)
-+	    && (phys_to_machine_mapping[pfn] != mfn))
-+		return max_mapnr; /* force !pfn_valid() */
-+	return pfn;
++	switch (size) {
++		case 1:
++			__asm__ __volatile__("xchgb %b0,%1"
++				:"=q" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 2:
++			__asm__ __volatile__("xchgw %w0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 4:
++			__asm__ __volatile__("xchgl %0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++	}
++	return x;
 +}
 +
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++/*
++ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
++ * store NEW in MEM.  Return the initial value in MEM.  Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#ifdef CONFIG_X86_CMPXCHG
++#define __HAVE_ARCH_CMPXCHG 1
++#define cmpxchg(ptr,o,n)\
++	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++					(unsigned long)(n),sizeof(*(ptr))))
++#endif
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++				      unsigned long new, int size)
 +{
-+	BUG_ON(max_mapnr && pfn >= max_mapnr);
-+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+		return;
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 4:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
 +	}
-+	phys_to_machine_mapping[pfn] = mfn;
++	return old;
 +}
 +
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+	return machine;
-+}
++#ifndef CONFIG_X86_CMPXCHG
++/*
++ * Building a kernel capable running on 80386. It may be necessary to
++ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
++ * a function for each of the sizes we support.
++ */
 +
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+	return phys;
-+}
++extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
++extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
++extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
 +
-+#ifdef CONFIG_X86_PAE
-+static inline paddr_t pte_phys_to_machine(paddr_t phys)
++static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
++				      unsigned long new, int size)
 +{
-+	/*
-+	 * In PAE mode, the NX bit needs to be dealt with in the value
-+	 * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
-+	 * but for i386 the conversion to ulong for the argument will
-+	 * clip it off.
-+	 */
-+	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
-+	return machine;
++	switch (size) {
++	case 1:
++		return cmpxchg_386_u8(ptr, old, new);
++	case 2:
++		return cmpxchg_386_u16(ptr, old, new);
++	case 4:
++		return cmpxchg_386_u32(ptr, old, new);
++	}
++	return old;
 +}
 +
-+static inline paddr_t pte_machine_to_phys(maddr_t machine)
++#define cmpxchg(ptr,o,n)						\
++({									\
++	__typeof__(*(ptr)) __ret;					\
++	if (likely(boot_cpu_data.x86 > 3))				\
++		__ret = __cmpxchg((ptr), (unsigned long)(o),		\
++					(unsigned long)(n), sizeof(*(ptr))); \
++	else								\
++		__ret = cmpxchg_386((ptr), (unsigned long)(o),		\
++					(unsigned long)(n), sizeof(*(ptr))); \
++	__ret;								\
++})
++#endif
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
++				      unsigned long long new)
 +{
-+	/*
-+	 * In PAE mode, the NX bit needs to be dealt with in the value
-+	 * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
-+	 * but for i386 the conversion to ulong for the argument will
-+	 * clip it off.
-+	 */
-+	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
-+	return phys;
++	unsigned long long prev;
++	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
++			     : "=A"(prev)
++			     : "b"((unsigned long)new),
++			       "c"((unsigned long)(new >> 32)),
++			       "m"(*__xg(ptr)),
++			       "0"(old)
++			     : "memory");
++	return prev;
 +}
-+#endif
 +
-+#ifdef CONFIG_X86_PAE
-+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	pte_t pte;
++#define cmpxchg64(ptr,o,n)\
++	((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
++					(unsigned long long)(n)))
 +
-+	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-+					(pgprot_val(pgprot) >> 32);
-+	pte.pte_high &= (__supported_pte_mask >> 32);
-+	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-+							__supported_pte_mask;
-+	return pte;
-+}
-+#else
-+#define pfn_pte_ma(pfn, prot)	__pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +#endif
++    
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ *
++ * For now, "wmb()" doesn't actually do anything, as all
++ * Intel CPU's follow what Intel calls a *Processor Order*,
++ * in which all writes are seen in the program order even
++ * outside the CPU.
++ *
++ * I expect future Intel CPU's to have a weaker ordering,
++ * but I'd also expect them to finally get their act together
++ * and add some real memory barriers if so.
++ *
++ * Some non intel clones support out of order store. wmb() ceases to be a
++ * nop for these.
++ */
++ 
 +
-+#define __pte_ma(x)	((pte_t) { (x) } )
++/* 
++ * Actually only lfence would be needed for mb() because all stores done 
++ * by the kernel should be already ordered. But keep a full barrier for now. 
++ */
 +
-+#else /* !CONFIG_XEN */
++#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
++#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
 +
-+#define pfn_to_mfn(pfn) (pfn)
-+#define mfn_to_pfn(mfn) (mfn)
-+#define mfn_to_local_pfn(mfn) (mfn)
-+#define set_phys_to_machine(pfn, mfn) ((void)0)
-+#define phys_to_machine_mapping_valid(pfn) (1)
-+#define phys_to_machine(phys) ((maddr_t)(phys))
-+#define machine_to_phys(mach) ((paddr_t)(mach))
-+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
-+#define __pte_ma(x) __pte(x)
++/**
++ * read_barrier_depends - Flush all pending reads that subsequents reads
++ * depend on.
++ *
++ * No data-dependent reads from memory-like regions are ever reordered
++ * over this barrier.  All reads preceding this primitive are guaranteed
++ * to access memory (but not necessarily other CPUs' caches) before any
++ * reads following this primitive that depend on the data return by
++ * any of the preceding reads.  This primitive is much lighter weight than
++ * rmb() on most CPUs, and is never heavier weight than is
++ * rmb().
++ *
++ * These ordering constraints are respected by both the local CPU
++ * and the compiler.
++ *
++ * Ordering is not guaranteed by anything other than these primitives,
++ * not even by data dependencies.  See the documentation for
++ * memory_barrier() for examples and URLs to more information.
++ *
++ * For example, the following code would force ordering (the initial
++ * value of "a" is zero, "b" is one, and "p" is "&a"):
++ *
++ * <programlisting>
++ *	CPU 0				CPU 1
++ *
++ *	b = 2;
++ *	memory_barrier();
++ *	p = &b;				q = p;
++ *					read_barrier_depends();
++ *					d = *q;
++ * </programlisting>
++ *
++ * because the read of "*q" depends on the read of "p" and these
++ * two reads are separated by a read_barrier_depends().  However,
++ * the following code, with the same initial values for "a" and "b":
++ *
++ * <programlisting>
++ *	CPU 0				CPU 1
++ *
++ *	a = 2;
++ *	memory_barrier();
++ *	b = 3;				y = b;
++ *					read_barrier_depends();
++ *					x = a;
++ * </programlisting>
++ *
++ * does not enforce ordering, since there is no data dependency between
++ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
++ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
++ * in cases like this where there are no data dependencies.
++ **/
 +
-+#endif /* !CONFIG_XEN */
++#define read_barrier_depends()	do { } while(0)
 +
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
++#ifdef CONFIG_X86_OOSTORE
++/* Actually there are no OOO store capable CPUs for now that do SSE, 
++   but make it already an possibility. */
++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
++#else
++#define wmb()	__asm__ __volatile__ ("": : :"memory")
++#endif
 +
-+#endif /* _I386_MADDR_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/mmu_context.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/mmu_context.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/mmu_context.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,108 @@
-+#ifndef __I386_SCHED_H
-+#define __I386_SCHED_H
++#ifdef CONFIG_SMP
++#define smp_mb()	mb()
++#define smp_rmb()	rmb()
++#define smp_wmb()	wmb()
++#define smp_read_barrier_depends()	read_barrier_depends()
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++#else
++#define smp_mb()	barrier()
++#define smp_rmb()	barrier()
++#define smp_wmb()	barrier()
++#define smp_read_barrier_depends()	do { } while(0)
++#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif
 +
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
++#include <linux/irqflags.h>
 +
 +/*
-+ * Used for LDT copy/destruction.
++ * disable hlt during certain critical i/o operations
 + */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
-+
-+
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if 0 /* XEN: no lazy tlb */
-+	unsigned cpu = smp_processor_id();
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
-+#endif
-+}
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
 +
-+#define prepare_arch_switch(next)	__prepare_arch_switch()
++extern int es7000_plat;
++void cpu_idle_wait(void);
 +
-+static inline void __prepare_arch_switch(void)
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible:
++ */
++static inline void sched_cacheflush(void)
 +{
-+	/*
-+	 * Save away %fs and %gs. No need to save %es and %ds, as those
-+	 * are always kernel segments while inside the kernel. Must
-+	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
-+	 */
-+	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
-+		: "=m" (current->thread.fs),
-+		  "=m" (current->thread.gs));
-+	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
-+		: : "r" (0) );
++	wbinvd();
 +}
 +
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void switch_mm(struct mm_struct *prev,
-+			     struct mm_struct *next,
-+			     struct task_struct *tsk)
-+{
-+	int cpu = smp_processor_id();
-+	struct mmuext_op _op[2], *op = _op;
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 +
-+	if (likely(prev != next)) {
-+		BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
-+		       !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
++void default_idle(void);
 +
-+		/* stop flush ipis for the previous mm */
-+		cpu_clear(cpu, prev->cpu_vm_mask);
-+#if 0 /* XEN: no lazy tlb */
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+		per_cpu(cpu_tlbstate, cpu).active_mm = next;
 +#endif
-+		cpu_set(cpu, next->cpu_vm_mask);
-+
-+		/* Re-load page tables: load_cr3(next->pgd) */
-+		op->cmd = MMUEXT_NEW_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+		op++;
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/tlbflush.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/tlbflush.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,101 @@
++#ifndef _I386_TLBFLUSH_H
++#define _I386_TLBFLUSH_H
 +
-+		/*
-+		 * load the LDT, if the LDT is different:
-+		 */
-+		if (unlikely(prev->context.ldt != next->context.ldt)) {
-+			/* load_LDT_nolock(&next->context, cpu) */
-+			op->cmd = MMUEXT_SET_LDT;
-+			op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+			op->arg2.nr_ents     = next->context.size;
-+			op++;
-+		}
++#include <linux/mm.h>
++#include <asm/processor.h>
 +
-+		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+	}
-+#if 0 /* XEN: no lazy tlb */
-+	else {
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
++#define __flush_tlb() xen_tlb_flush()
++#define __flush_tlb_global() xen_tlb_flush()
++#define __flush_tlb_all() xen_tlb_flush()
 +
-+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-+			/* We were in lazy tlb mode and leave_mm disabled 
-+			 * tlb flush IPI delivery. We must reload %cr3.
-+			 */
-+			load_cr3(next->pgd);
-+			load_LDT_nolock(&next->context, cpu);
-+		}
-+	}
-+#endif
-+}
++extern unsigned long pgkern_mask;
 +
-+#define deactivate_mm(tsk, mm) \
-+	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
 +
-+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-+{
-+	if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
-+		mm_pin(next);
-+	switch_mm(prev, next, NULL);
-+}
++#define __flush_tlb_single(addr) xen_invlpg(addr)
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/mmu.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/mmu.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/mmu.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,29 @@
-+#ifndef __i386_MMU_H
-+#define __i386_MMU_H
++#define __flush_tlb_one(addr) __flush_tlb_single(addr)
 +
-+#include <asm/semaphore.h>
 +/*
-+ * The i386 doesn't have a mmu context, but
-+ * we put the segment information here.
++ * TLB flushing:
 + *
-+ * cpu_vm_mask is used to optimize ldt flushing.
++ *  - flush_tlb() flushes the current mm struct TLBs
++ *  - flush_tlb_all() flushes all processes TLBs
++ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ *  - flush_tlb_page(vma, vmaddr) flushes one page
++ *  - flush_tlb_range(vma, start, end) flushes a range of pages
++ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the i386 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
 + */
-+typedef struct { 
-+	int size;
-+	struct semaphore sem;
-+	void *ldt;
-+	void *vdso;
-+#ifdef CONFIG_XEN
-+	int has_foreign_mappings;
-+#endif
-+} mm_context_t;
 +
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++#ifndef CONFIG_SMP
 +
-+/* kernel/fork.c:dup_mmap hook */
-+extern void _arch_dup_mmap(struct mm_struct *mm);
-+#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/page.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/page.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/page.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,235 @@
-+#ifndef _I386_PAGE_H
-+#define _I386_PAGE_H
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++	if (mm == current->active_mm)
++		__flush_tlb();
++}
 +
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT	12
-+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-+#define PAGE_MASK	(~(PAGE_SIZE-1))
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++	unsigned long addr)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++	unsigned long start, unsigned long end)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb();
++}
 +
-+#ifdef CONFIG_X86_PAE
-+#define __PHYSICAL_MASK_SHIFT	40
-+#define __PHYSICAL_MASK		((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
-+#define PHYSICAL_PAGE_MASK	(~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
 +#else
-+#define __PHYSICAL_MASK_SHIFT	32
-+#define __PHYSICAL_MASK		(~0UL)
-+#define PHYSICAL_PAGE_MASK	(PAGE_MASK & __PHYSICAL_MASK)
-+#endif
 +
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++#include <asm/smp.h>
 +
-+#ifdef __KERNEL__
++#define local_flush_tlb() \
++	__flush_tlb()
 +
-+/*
-+ * Need to repeat this here in order to not include pgtable.h (which in turn
-+ * depends on definitions made here), but to be able to use the symbolic
-+ * below. The preprocessor will warn if the two definitions aren't identical.
-+ */
-+#define _PAGE_PRESENT	0x001
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
 +
-+#ifndef __ASSEMBLY__
++#define flush_tlb()	flush_tlb_current_task()
 +
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <asm/bug.h>
-+#include <xen/interface/xen.h>
-+#include <xen/features.h>
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++	flush_tlb_mm(vma->vm_mm);
++}
 +
-+#define arch_free_page(_page,_order)		\
-+({	int foreign = PageForeign(_page);	\
-+	if (foreign)				\
-+		PageForeignDestructor(_page);	\
-+	foreign;				\
-+})
-+#define HAVE_ARCH_FREE_PAGE
++#define TLBSTATE_OK	1
++#define TLBSTATE_LAZY	2
 +
-+#ifdef CONFIG_X86_USE_3DNOW
++struct tlb_state
++{
++	struct mm_struct *active_mm;
++	int state;
++	char __cacheline_padding[L1_CACHE_BYTES-8];
++};
++DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 +
-+#include <asm/mmx.h>
 +
-+#define clear_page(page)	mmx_clear_page((void *)(page))
-+#define copy_page(to,from)	mmx_copy_page(to,from)
++#endif
 +
-+#else
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
 +
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++				      unsigned long start, unsigned long end)
++{
++	/* i386 does not keep any page table caches in TLB */
++}
 +
++#endif /* _I386_TLBFLUSH_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/vga.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/vga.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,20 @@
 +/*
-+ *	On older X86 processors it's not a win to use MMX here it seems.
-+ *	Maybe the K6-III ?
++ *	Access to VGA videoram
++ *
++ *	(c) 1998 Martin Mares <mj at ucw.cz>
 + */
-+ 
-+#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
-+#define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-+
-+#endif
 +
-+#define clear_user_page(page, vaddr, pg)	clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
 +
 +/*
-+ * These are used to make use of C type-checking..
++ *	On the PC, we can just recalculate addresses and then
++ *	access the videoram directly without any black magic.
 + */
-+extern int nx_enabled;
-+#ifdef CONFIG_X86_PAE
-+extern unsigned long long __supported_pte_mask;
-+typedef struct { unsigned long pte_low, pte_high; } pte_t;
-+typedef struct { unsigned long long pmd; } pmd_t;
-+typedef struct { unsigned long long pgd; } pgd_t;
-+typedef struct { unsigned long long pgprot; } pgprot_t;
-+#define pgprot_val(x)	((x).pgprot)
-+#include <asm/maddr.h>
-+#define __pte(x) ({ unsigned long long _x = (x);        \
-+    if (_x & _PAGE_PRESENT) _x = pte_phys_to_machine(_x);   \
-+    ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
-+#define __pgd(x) ({ unsigned long long _x = (x); \
-+    (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
-+#define __pmd(x) ({ unsigned long long _x = (x); \
-+    (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
-+static inline unsigned long long __pte_val(pte_t x)
-+{
-+	return ((unsigned long long)x.pte_high << 32) | x.pte_low;
-+}
-+static inline unsigned long long pte_val(pte_t x)
-+{
-+	unsigned long long ret = __pte_val(x);
-+	if (x.pte_low & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+	return ret;
-+}
-+#define __pmd_val(x) ((x).pmd)
-+static inline unsigned long long pmd_val(pmd_t x)
-+{
-+	unsigned long long ret = __pmd_val(x);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+#endif
-+	return ret;
-+}
-+#define __pud_val(x) __pgd_val((x).pgd)
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long long pgd_val(pgd_t x)
-+{
-+	unsigned long long ret = __pgd_val(x);
-+	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+	return ret;
-+}
-+#define HPAGE_SHIFT	21
-+#else
-+typedef struct { unsigned long pte_low; } pte_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+typedef struct { unsigned long pgprot; } pgprot_t;
-+#define pgprot_val(x)	((x).pgprot)
-+#include <asm/maddr.h>
-+#define boot_pte_t pte_t /* or would you rather have a typedef */
-+#define __pte_val(x) ((x).pte_low)
-+#define pte_val(x) (__pte_val(x) & _PAGE_PRESENT ? \
-+                    machine_to_phys(__pte_val(x)) : \
-+                    __pte_val(x))
-+#define __pte(x) ({ unsigned long _x = (x); \
-+    (pte_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
-+#define __pmd_val(x) __pud_val((x).pud)
-+#define __pud_val(x) __pgd_val((x).pgd)
-+#define __pgd(x) ({ unsigned long _x = (x); \
-+    (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+	unsigned long ret = __pgd_val(x);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+	if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret);
-+#endif
-+	return ret;
-+}
-+#define HPAGE_SHIFT	22
-+#endif
-+#define PTE_MASK	PHYSICAL_PAGE_MASK
 +
-+#ifdef CONFIG_HUGETLB_PAGE
-+#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
-+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-+#endif
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
 +
-+#define __pgprot(x)	((pgprot_t) { (x) } )
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
 +
-+#endif /* !__ASSEMBLY__ */
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/asm/xenoprof.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/asm/xenoprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-i386/mach-xen/asm/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
 +
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
++struct super_block;
++struct dentry;
++int xenoprof_create_files(struct super_block * sb, struct dentry * root);
++#define HAVE_XENOPROF_CREATE_FILES
++
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
 +
++struct xenoprof_arch_shared_buffer {
++	/* nothing */
++};
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
++
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/irq_vectors.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/irq_vectors.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,131 @@
 +/*
-+ * This handles the memory map.. We could make this a config
-+ * option, but too many people screw it up, and too few need
-+ * it.
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
 + *
-+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
-+ * a virtual address space of one gigabyte, which limits the
-+ * amount of physical memory you can use to about 950MB. 
++ * In addition, there are some standard defines:
 + *
-+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
-+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
-+ */
-+
-+#ifndef __ASSEMBLY__
-+
-+struct vm_area_struct;
++ *	FIRST_EXTERNAL_VECTOR:
++ *		The first free place for external interrupts
++ *
++ *	SYSCALL_VECTOR:
++ *		The IRQ vector a syscall makes the user to kernel transition
++ *		under.
++ *
++ *	TIMER_IRQ:
++ *		The IRQ number the timer interrupt comes in at.
++ *
++ *	NR_IRQS:
++ *		The total number of interrupt vectors (including all the
++ *		architecture specific interrupts) needed.
++ *
++ */			
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
 +
 +/*
-+ * This much address space is reserved for vmalloc() and iomap()
-+ * as well as fixmap mappings.
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
 + */
-+extern unsigned int __VMALLOC_RESERVE;
-+
-+extern int sysctl_legacy_va_layout;
++#define FIRST_EXTERNAL_VECTOR	0x20
 +
-+extern int page_is_ram(unsigned long pagenr);
++#define SYSCALL_VECTOR		0x80
 +
-+#endif /* __ASSEMBLY__ */
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
 +
-+#ifdef __ASSEMBLY__
-+#define __PAGE_OFFSET		CONFIG_PAGE_OFFSET
-+#define __PHYSICAL_START	CONFIG_PHYSICAL_START
-+#else
-+#define __PAGE_OFFSET		((unsigned long)CONFIG_PAGE_OFFSET)
-+#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
-+#endif
-+#define __KERNEL_START		(__PAGE_OFFSET + __PHYSICAL_START)
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ *  some of the following vectors are 'rare', they are merged
++ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ *  TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
++#define INVALIDATE_TLB_VECTOR	0xfd
++#define RESCHEDULE_VECTOR	0xfc
++#define CALL_FUNCTION_VECTOR	0xfb
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET		0
++#define THERMAL_APIC_VECTOR	0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR	0xef
 +#endif
 +
-+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
-+#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
-+#define MAXMEM			(__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
-+#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
-+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-+#ifdef CONFIG_FLATMEM
-+#define pfn_valid(pfn)		((pfn) < max_mapnr)
-+#endif /* CONFIG_FLATMEM */
-+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define VM_DATA_DEFAULT_FLAGS \
-+	(VM_READ | VM_WRITE | \
-+	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+
-+#include <asm-generic/memory_model.h>
-+#include <asm-generic/page.h>
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
 +
-+#define __HAVE_ARCH_GATE_AREA 1
-+#endif /* __KERNEL__ */
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR	0x31
++#define FIRST_SYSTEM_VECTOR	0xef
 +
-+#endif /* _I386_PAGE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/param.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/param.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/param.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/param.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,23 @@
-+#ifndef _ASMi386_PARAM_H
-+#define _ASMi386_PARAM_H
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
 +
-+#ifdef __KERNEL__
-+# define HZ		CONFIG_HZ	/* Internal kernel timer frequency */
-+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
-+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
-+#endif
++#define RESCHEDULE_VECTOR	0
++#define CALL_FUNCTION_VECTOR	1
++#define NR_IPIS			2
 +
-+#ifndef HZ
-+#define HZ 100
-+#endif
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
 +
-+#define EXEC_PAGESIZE	4096
++#define FPU_IRQ			13
 +
-+#ifndef NOGROUP
-+#define NOGROUP		(-1)
-+#endif
++#define	FIRST_VM86_IRQ		3
++#define LAST_VM86_IRQ		15
++#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
 +
-+#define MAXHOSTNAMELEN	64	/* max length of hostname */
-+#define COMMAND_LINE_SIZE 256
++/*
++ * The flat IRQ space is divided into two regions:
++ *  1. A one-to-one mapping of real physical IRQs. This space is only used
++ *     if we have physical device-access privilege. This region is at the 
++ *     start of the IRQ space so that existing device drivers do not need
++ *     to be modified to translate physical IRQ numbers into our IRQ space.
++ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ *     are bound using the provided bind/unbind functions.
++ */
 +
++#define PIRQ_BASE		0
++#if !defined(MAX_IO_APICS)
++# define NR_PIRQS		(NR_VECTORS + 32 * NR_CPUS)
++#elif NR_CPUS < MAX_IO_APICS
++# define NR_PIRQS		(NR_VECTORS + 32 * NR_CPUS)
++#else
++# define NR_PIRQS		(NR_VECTORS + 32 * MAX_IO_APICS)
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pci.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pci.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pci.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,148 @@
-+#ifndef __i386_PCI_H
-+#define __i386_PCI_H
 +
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		256
 +
-+#ifdef __KERNEL__
-+#include <linux/mm.h>		/* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
++#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS		NR_IRQS
 +
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
 +
-+#include <asm/hypervisor.h>
-+#define pcibios_scan_all_fns(a, b)	(!is_initial_xendomain())
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/mach_traps.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/mach_traps.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,33 @@
++/*
++ *  include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ *
++ *  Machine specific NMI handling for Xen
++ */
++#ifndef _MACH_TRAPS_H
++#define _MACH_TRAPS_H
 +
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
++#include <linux/bitops.h>
++#include <xen/interface/nmi.h>
 +
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
++static inline void clear_mem_error(unsigned char reason) {}
++static inline void clear_io_check_error(unsigned char reason) {}
 +
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++static inline unsigned char get_nmi_reason(void)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned char reason = 0;
 +
-+/* Dynamic DMA mapping stuff.
-+ * i386 has everything mapped statically.
-+ */
++	/* construct a value which looks like it came from
++	 * port 0x61.
++	 */
++	if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++		reason |= 0x40;
++	if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++		reason |= 0x80;
 +
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/io.h>
++        return reason;
++}
 +
-+struct pci_dev;
++static inline void reassert_nmi(void) {}
 +
-+#ifdef CONFIG_SWIOTLB
++#endif /* !_MACH_TRAPS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/mach-xen/setup_arch.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-i386/mach-xen/setup_arch.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
 +
++#define ARCH_SETUP machine_specific_arch_setup();
 +
-+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
-+#define PCI_DMA_BUS_IS_PHYS	(0)
++void __init machine_specific_arch_setup(void);
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/page.h
+--- a/include/asm-i386/page.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/page.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -122,7 +122,7 @@
+ 
+ #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
+-#define MAXMEM			(-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define MAXMEM			(__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
+ #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
+ #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/pgtable-2level-defs.h
+--- a/include/asm-i386/pgtable-2level-defs.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/pgtable-2level-defs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,7 @@
+ #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+ #define _I386_PGTABLE_2LEVEL_DEFS_H
 +
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
++#define HAVE_SHARED_KERNEL_PMD 0
+ 
+ /*
+  * traditional i386 two-level paging structure:
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-i386/pgtable-3level-defs.h
+--- a/include/asm-i386/pgtable-3level-defs.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-i386/pgtable-3level-defs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,7 @@
+ #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
+ #define _I386_PGTABLE_3LEVEL_DEFS_H
 +
++#define HAVE_SHARED_KERNEL_PMD 1
+ 
+ /*
+  * PGDIR_SHIFT determines what a top-level page table entry can map
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/agp.h
+--- a/include/asm-ia64/agp.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/agp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -19,13 +19,44 @@
+ #define flush_agp_cache()		mb()
+ 
+ /* Convert a physical address to an address suitable for the GART. */
++#ifndef CONFIG_XEN
+ #define phys_to_gart(x) (x)
+ #define gart_to_phys(x) (x)
++#else
++#define phys_to_gart(x) phys_to_machine_for_dma(x)
++#define gart_to_phys(x) machine_to_phys_for_dma(x)
++#endif
+ 
+ /* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#ifndef CONFIG_XEN
+ #define alloc_gatt_pages(order)		\
+ 	((char *)__get_free_pages(GFP_KERNEL, (order)))
+ #define free_gatt_pages(table, order)	\
+ 	free_pages((unsigned long)(table), (order))
 +#else
++#include <asm/hypervisor.h>
++static inline char*
++alloc_gatt_pages(unsigned int order)
++{
++	unsigned long error;
++	unsigned long ret = __get_free_pages(GFP_KERNEL, (order));
++	if (ret == 0) {
++		goto out;
++	}
++	error = xen_create_contiguous_region(ret, order, 0);
++	if (error) {
++		free_pages(ret, order);
++		ret = 0;
++	}
++out:
++	return (char*)ret;
++}
++static inline void
++free_gatt_pages(void* table, unsigned int order)
++{
++	xen_destroy_contiguous_region((unsigned long)table, order);
++	free_pages((unsigned long)table, order);
++}
++#endif /* CONFIG_XEN */
+ 
+ #endif /* _ASM_IA64_AGP_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/gcc_intrin.h
+--- a/include/asm-ia64/gcc_intrin.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/gcc_intrin.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -26,7 +26,7 @@
+ 
+ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
+ 
+-#define ia64_setreg(regnum, val)						\
++#define __ia64_setreg(regnum, val)						\
+ ({										\
+ 	switch (regnum) {							\
+ 	    case _IA64_REG_PSR_L:						\
+@@ -55,7 +55,7 @@
+ 	}									\
+ })
+ 
+-#define ia64_getreg(regnum)							\
++#define __ia64_getreg(regnum)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 										\
+@@ -92,7 +92,7 @@
+ 
+ #define ia64_hint_pause 0
+ 
+-#define ia64_hint(mode)						\
++#define __ia64_hint(mode)						\
+ ({								\
+ 	switch (mode) {						\
+ 	case ia64_hint_pause:					\
+@@ -374,7 +374,7 @@
+ 
+ #define ia64_invala() asm volatile ("invala" ::: "memory")
+ 
+-#define ia64_thash(addr)							\
++#define __ia64_thash(addr)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
+@@ -394,18 +394,18 @@
+ 
+ #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
+ 
+-#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
++#define __ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+ 
+-#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
++#define __ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+ 
+ 
+-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
++#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"			\
+ 					     :: "r"(trnum), "r"(addr) : "memory")
+ 
+-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
++#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"			\
+ 					     :: "r"(trnum), "r"(addr) : "memory")
+ 
+-#define ia64_tpa(addr)								\
++#define __ia64_tpa(addr)							\
+ ({										\
+ 	__u64 ia64_pa;								\
+ 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
+@@ -415,22 +415,22 @@
+ #define __ia64_set_dbr(index, val)						\
+ 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_ibr(index, val)						\
++#define __ia64_set_ibr(index, val)						\
+ 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pkr(index, val)						\
++#define __ia64_set_pkr(index, val)						\
+ 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pmc(index, val)						\
++#define __ia64_set_pmc(index, val)						\
+ 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pmd(index, val)						\
++#define __ia64_set_pmd(index, val)						\
+ 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_rr(index, val)							\
++#define __ia64_set_rr(index, val)							\
+ 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+ 
+-#define ia64_get_cpuid(index)								\
++#define __ia64_get_cpuid(index)								\
+ ({											\
+ 	__u64 ia64_intri_res;								\
+ 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
+@@ -444,21 +444,21 @@
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_ibr(index)							\
++#define __ia64_get_ibr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_pkr(index)							\
++#define __ia64_get_pkr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_pmc(index)							\
++#define __ia64_get_pmc(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+@@ -466,48 +466,48 @@
+ })
+ 
+ 
+-#define ia64_get_pmd(index)							\
++#define __ia64_get_pmd(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_rr(index)							\
++#define __ia64_get_rr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
++#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
+ 
+ 
+ #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
+ 
+-#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
+-#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
++#define __ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
++#define __ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
+ #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
+ #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
+ 
+-#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
++#define __ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
+ 
+-#define ia64_ptcga(addr, size)							\
++#define __ia64_ptcga(addr, size)							\
+ do {										\
+ 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+ 	ia64_dv_serialize_data();						\
+ } while (0)
+ 
+-#define ia64_ptcl(addr, size)							\
++#define __ia64_ptcl(addr, size)							\
+ do {										\
+ 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+ 	ia64_dv_serialize_data();						\
+ } while (0)
+ 
+-#define ia64_ptri(addr, size)						\
++#define __ia64_ptri(addr, size)						\
+ 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+ 
+-#define ia64_ptrd(addr, size)						\
++#define __ia64_ptrd(addr, size)						\
+ 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+ 
+ /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+@@ -589,7 +589,7 @@
+         }								\
+ })
+ 
+-#define ia64_intrin_local_irq_restore(x)			\
++#define __ia64_intrin_local_irq_restore(x)			\
+ do {								\
+ 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
+ 		      "(p6) ssm psr.i;"				\
+@@ -598,4 +598,6 @@
+ 		      :: "r"((x)) : "p6", "p7", "memory");	\
+ } while (0)
+ 
++#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
 +
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions.
+ #endif /* _ASM_IA64_GCC_INTRIN_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/gnttab_dma.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/gnttab_dma.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + */
-+#define PCI_DMA_BUS_IS_PHYS	(1)
-+
-+/* pci_unmap_{page,single} is a nop so... */
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
-+
-+#endif
-+
-+/* This is always fine. */
-+#define pci_dac_dma_supported(pci_dev, mask)	(1)
 +
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
++#ifndef _ASM_IA64_GNTTAB_DMA_H
++#define _ASM_IA64_GNTTAB_DMA_H
 +
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++static inline int gnttab_dma_local_pfn(struct page *page)
 +{
-+	return pfn_to_page(dma_addr >> PAGE_SHIFT);
++	return 0;
 +}
 +
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++/* caller must get dma address after calling this function */
++static inline void gnttab_dma_use_page(struct page *page)
 +{
-+	return (dma_addr & ~PAGE_MASK);
++	__gnttab_dma_map_page(page);
 +}
 +
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++static inline dma_addr_t gnttab_dma_map_page(struct page *page)
 +{
++	gnttab_dma_use_page(page);
++	return page_to_bus(page);
 +}
 +
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++static inline dma_addr_t gnttab_dma_map_virt(void *ptr)
 +{
-+	flush_write_buffers();
++	return gnttab_dma_map_page(virt_to_page(ptr)) + offset_in_page(ptr);
 +}
 +
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
-+
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++static inline void gnttab_dma_unmap_page(dma_addr_t dma_address)
 +{
++	__gnttab_dma_unmap_page(virt_to_page(bus_to_virt(dma_address)));
 +}
 +
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+					enum pci_dma_burst_strategy *strat,
-+					unsigned long *strategy_parameter)
-+{
-+	*strat = PCI_DMA_BURST_INFINITY;
-+	*strategy_parameter = ~0UL;
-+}
++#endif /* _ASM_IA64_GNTTAB_DMA_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/hw_irq.h
+--- a/include/asm-ia64/hw_irq.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/hw_irq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,7 +15,11 @@
+ #include <asm/ptrace.h>
+ #include <asm/smp.h>
+ 
++#ifndef CONFIG_XEN
+ typedef u8 ia64_vector;
++#else
++typedef u16 ia64_vector;
 +#endif
-+
-+#endif /* __KERNEL__ */
-+
-+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
-+#include <xen/pcifront.h>
-+#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
-+
-+/* implement the pci_ DMA API in terms of the generic device dma_ one */
-+#include <asm-generic/pci-dma-compat.h>
-+
-+/* generic pci stuff */
-+#include <asm-generic/pci.h>
-+
-+#endif /* __i386_PCI_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgalloc.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgalloc.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgalloc.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,59 @@
-+#ifndef _I386_PGALLOC_H
-+#define _I386_PGALLOC_H
-+
-+#include <asm/fixmap.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>		/* for struct page */
-+#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
-+
-+#define pmd_populate_kernel(mm, pmd, pte) \
-+		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-+
-+#define pmd_populate(mm, pmd, pte) 					\
-+do {									\
-+	unsigned long pfn = page_to_pfn(pte);				\
-+	if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) {	\
-+		if (!PageHighMem(pte))					\
-+			BUG_ON(HYPERVISOR_update_va_mapping(		\
-+			  (unsigned long)__va(pfn << PAGE_SHIFT),	\
-+			  pfn_pte(pfn, PAGE_KERNEL_RO), 0));		\
-+		else if (!test_and_set_bit(PG_pinned, &pte->flags))	\
-+			kmap_flush_unused();				\
-+		set_pmd(pmd,						\
-+		        __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
-+	} else							\
-+		*(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
-+} while (0)
-+
-+/*
-+ * Allocate and free page tables.
+ 
+ /*
+  * 0 special
+@@ -99,6 +103,12 @@
+ 
+ static inline void ia64_resend_irq(unsigned int vector)
+ {
++#ifdef CONFIG_XEN
++	extern int resend_irq_on_evtchn(unsigned int i);
++	if (is_running_on_xen())
++		resend_irq_on_evtchn(vector);
++	else
++#endif /* CONFIG_XEN */
+ 	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/hypercall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/hypercall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,458 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+extern pgd_t *pgd_alloc(struct mm_struct *);
-+extern void pgd_free(pgd_t *pgd);
-+
-+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-+
-+static inline void pte_free_kernel(pte_t *pte)
-+{
-+	free_page((unsigned long)pte);
-+	make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
-+}
-+
-+extern void pte_free(struct page *pte);
 +
-+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
 +
-+#ifdef CONFIG_X86_PAE
-+/*
-+ * In the PAE case we free the pmds as part of the pgd.
-+ */
-+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
-+#define pmd_free(x)			do { } while (0)
-+#define __pmd_free_tlb(tlb,x)		do { } while (0)
-+#define pud_populate(mm, pmd, pte)	BUG()
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
 +#endif
 +
-+#define check_pgt_cache()	do { } while (0)
-+
-+#endif /* _I386_PGALLOC_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,20 @@
-+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
-+#define _I386_PGTABLE_2LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * traditional i386 two-level paging structure:
-+ */
-+
-+#define PGDIR_SHIFT	22
-+#define PTRS_PER_PGD	1024
-+
-+/*
-+ * the i386 is two-level, so we don't really have any
-+ * PMD directory physically.
-+ */
-+
-+#define PTRS_PER_PTE	1024
-+
-+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-2level.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-2level.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-2level.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-2level.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,120 @@
-+#ifndef _I386_PGTABLE_2LEVEL_H
-+#define _I386_PGTABLE_2LEVEL_H
-+
-+#include <asm-generic/pgtable-nopmd.h>
-+
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
-+	       __pte_val(e), pte_pfn(e))
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
-+	       __pgd_val(e), pgd_val(e) >> PAGE_SHIFT)
++#include <asm/xen/xcom_hcall.h>
++struct xencomm_handle;
++extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
++                                 unsigned long a3, unsigned long a4,
++                                 unsigned long a5, unsigned long cmd);
 +
 +/*
-+ * Certain architectures need to do special things when PTEs
-+ * within a page table are directly modified.  Thus, the following
-+ * hook is made available.
++ * Assembler stubs for hyper-calls.
 + */
-+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
 +
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
-+
-+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-+
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-+
-+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-+
-+#define pte_none(x) (!(x).pte_low)
++#define _hypercall0(type, name)					\
++({								\
++	long __res;						\
++	__res=__hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);	\
++	(type)__res;						\
++})
 +
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	if (!pte_none(pte)) {
-+		if (mm != &init_mm)
-+			pte = __pte_ma(xchg(&ptep->pte_low, 0));
-+		else
-+			HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
-+	}
-+	return pte;
-+}
++#define _hypercall1(type, name, a1)				\
++({								\
++	long __res;						\
++	__res = __hypercall((unsigned long)a1,			\
++	                     0, 0, 0, 0, __HYPERVISOR_##name);	\
++	(type)__res;						\
++})
 +
-+#define ptep_clear_flush(vma, addr, ptep)			\
++#define _hypercall2(type, name, a1, a2)				\
 +({								\
-+	pte_t *__ptep = (ptep);					\
-+	pte_t __res = *__ptep;					\
-+	if (!pte_none(__res) &&					\
-+	    ((vma)->vm_mm != current->mm ||			\
-+	     HYPERVISOR_update_va_mapping(addr, __pte(0),	\
-+			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+				UVMF_INVLPG|UVMF_MULTI))) {	\
-+		__ptep->pte_low = 0;				\
-+		flush_tlb_page(vma, addr);			\
-+	}							\
-+	__res;							\
++	long __res;						\
++	__res = __hypercall((unsigned long)a1,			\
++	                    (unsigned long)a2,			\
++	                    0, 0, 0, __HYPERVISOR_##name);	\
++	(type)__res;						\
 +})
 +
-+#define pte_same(a, b)		((a).pte_low == (b).pte_low)
++#define _hypercall3(type, name, a1, a2, a3)			\
++({								\
++	long __res;						\
++	__res = __hypercall((unsigned long)a1,			\
++	                    (unsigned long)a2,			\
++	                    (unsigned long)a3,			\
++	                    0, 0, __HYPERVISOR_##name);		\
++	(type)__res;						\
++})
 +
-+#define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-+#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+	mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
++#define _hypercall4(type, name, a1, a2, a3, a4)			\
++({								\
++	long __res;						\
++	__res = __hypercall((unsigned long)a1,			\
++	                    (unsigned long)a2,			\
++	                    (unsigned long)a3,			\
++	                    (unsigned long)a4,			\
++	                    0, __HYPERVISOR_##name);		\
++	(type)__res;						\
++})
 +
-+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
++({								\
++	long __res;						\
++	__res = __hypercall((unsigned long)a1,			\
++	                    (unsigned long)a2,			\
++	                    (unsigned long)a3,			\
++	                    (unsigned long)a4,			\
++	                    (unsigned long)a5,			\
++	                    __HYPERVISOR_##name);		\
++	(type)__res;						\
++})
 +
-+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +
-+/*
-+ * All present user pages are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
++static inline int
++xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
 +{
-+	return pte_user(pte);
++	return _hypercall2(int, sched_op, cmd, arg);
 +}
 +
-+/*
-+ * All present pages are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
++static inline long
++HYPERVISOR_set_timer_op(u64 timeout)
 +{
-+	return 1;
++	unsigned long timeout_hi = (unsigned long)(timeout >> 32);
++	unsigned long timeout_lo = (unsigned long)timeout;
++	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
 +}
 +
-+/*
-+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
-+ * into this range:
-+ */
-+#define PTE_FILE_MAX_BITS	29
-+
-+#define pte_to_pgoff(pte) \
-+	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-+
-+#define pgoff_to_pte(off) \
-+	((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val >> 1) & 0x1f)
-+#define __swp_offset(x)			((x).val >> 8)
-+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
-+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
-+
-+void vmalloc_sync_all(void);
-+
-+#endif /* _I386_PGTABLE_2LEVEL_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,24 @@
-+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
-+#define _I386_PGTABLE_3LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT	30
-+#define PTRS_PER_PGD	4
++static inline int
++xencomm_arch_hypercall_platform_op(struct xencomm_handle *op)
++{
++	return _hypercall1(int, platform_op, op);
++}
 +
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT	21
-+#define PTRS_PER_PMD	512
++static inline int
++xencomm_arch_hypercall_sysctl(struct xencomm_handle *op)
++{
++	return _hypercall1(int, sysctl, op);
++}
 +
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE	512
++static inline int
++xencomm_arch_hypercall_domctl(struct xencomm_handle *op)
++{
++	return _hypercall1(int, domctl, op);
++}
 +
-+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-3level.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-3level.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable-3level.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable-3level.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,209 @@
-+#ifndef _I386_PGTABLE_3LEVEL_H
-+#define _I386_PGTABLE_3LEVEL_H
++static inline int
++xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
++				 int nr_calls)
++{
++	return _hypercall2(int, multicall, call_list, nr_calls);
++}
 +
-+#include <asm-generic/pgtable-nopud.h>
++static inline int
++xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
++{
++	return _hypercall2(int, memory_op, cmd, arg);
++}
 +
-+/*
-+ * Intel Physical Address Extension (PAE) Mode - three-level page
-+ * tables on PPro+ CPUs.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
++static inline int
++xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
++{
++	return _hypercall2(int, event_channel_op, cmd, arg);
++}
 +
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pte_val(e), pte_pfn(e))
-+#define pmd_ERROR(e) \
-+	printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++static inline int
++xencomm_arch_hypercall_xsm_op(struct xencomm_handle *arg)
++{
++	return _hypercall1(int, xsm_op, arg);
++}
 +
-+#define pud_none(pud)				0
-+#define pud_bad(pud)				0
-+#define pud_present(pud)			1
++static inline int
++xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
++{
++	return _hypercall2(int, xen_version, cmd, arg);
++}
 +
-+/*
-+ * Is the pte executable?
-+ */
-+static inline int pte_x(pte_t pte)
++static inline int
++xencomm_arch_hypercall_console_io(int cmd, int count,
++                                  struct xencomm_handle *str)
 +{
-+	return !(__pte_val(pte) & _PAGE_NX);
++	return _hypercall3(int, console_io, cmd, count, str);
 +}
 +
-+/*
-+ * All present user-pages with !NX bit are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
++static inline int
++xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
 +{
-+	return pte_user(pte) && pte_x(pte);
++	return _hypercall2(int, physdev_op, cmd, arg);
 +}
-+/*
-+ * All present pages with !NX bit are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
++
++static inline int
++xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
++                                      struct xencomm_handle *uop,
++                                      unsigned int count)
 +{
-+	return pte_x(pte);
++	return _hypercall3(int, grant_table_op, cmd, uop, count);
 +}
 +
-+/* Rules for using set_pte: the pte being assigned *must* be
-+ * either not present or in a state where the hardware will
-+ * not attempt to update the pte.  In places where this is
-+ * not possible, use pte_get_and_clear to obtain the old pte
-+ * value and then use set_pte to update it.  -ben
-+ */
-+#define __HAVE_ARCH_SET_PTE_ATOMIC
++int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
 +
-+static inline void set_pte(pte_t *ptep, pte_t pte)
++extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
++
++static inline int
++xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
 +{
-+	ptep->pte_high = pte.pte_high;
-+	smp_wmb();
-+	ptep->pte_low = pte.pte_low;
++	return _hypercall2(int, callback_op, cmd, arg);
 +}
-+#define set_pte_atomic(pteptr,pteval) \
-+		set_64bit((unsigned long long *)(pteptr),__pte_val(pteval))
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
 +
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
++static inline unsigned long
++xencomm_arch_hypercall_hvm_op(int cmd, void *arg)
++{
++	return _hypercall2(unsigned long, hvm_op, cmd, arg);
++}
 +
-+#define set_pmd(pmdptr,pmdval)				\
-+		xen_l2_entry_update((pmdptr), (pmdval))
-+#define set_pud(pudptr,pudval) \
-+		xen_l3_entry_update((pudptr), (pudval))
++static inline long
++xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
++{
++	return _hypercall3(long, vcpu_op, cmd, cpu, arg);
++}
 +
-+/*
-+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
-+ * the TLB via cr3 if the top-level pgd is changed...
-+ * We do not let the generic code free and clear pgd entries due to
-+ * this erratum.
-+ */
-+static inline void pud_clear (pud_t * pud) { }
++static inline int
++HYPERVISOR_physdev_op(int cmd, void *arg)
++{
++	switch (cmd) {
++	case PHYSDEVOP_eoi:
++		return _hypercall1(int, ia64_fast_eoi,
++		                   ((struct physdev_eoi *)arg)->irq);
++	default:
++		return xencomm_hypercall_physdev_op(cmd, arg);
++	}
++}
 +
-+#define pud_page(pud) \
-+((struct page *) __va(pud_val(pud) & PAGE_MASK))
++static inline int
++xencomm_arch_hypercall_xenoprof_op(int op, struct xencomm_handle *arg)
++{
++	return _hypercall2(int, xenoprof_op, op, arg);
++}
 +
-+#define pud_page_kernel(pud) \
-+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++static inline long
++xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
++{
++	return _hypercall1(long, opt_feature, arg);
++}
 +
++extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
++static inline void exit_idle(void) {}
++#define do_IRQ(irq, regs) ({			\
++	irq_enter();				\
++	__do_IRQ((irq), (regs));		\
++	irq_exit();				\
++})
 +
-+/* Find an entry in the second-level page table.. */
-+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-+			pmd_index(address))
++#include <linux/err.h>
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
 +
-+static inline int pte_none(pte_t pte)
++static inline unsigned long
++__HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
 +{
-+	return !(pte.pte_low | pte.pte_high);
++	return _hypercall3(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_ioremap, ioaddr, size);
 +}
 +
-+/*
-+ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
-+ * entry, so clear the bottom half first and enforce ordering with a compiler
-+ * barrier.
-+ */
-+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline unsigned long
++HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
 +{
-+	if ((mm != current->mm && mm != &init_mm)
-+	    || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
-+		ptep->pte_low = 0;
-+		smp_wmb();
-+		ptep->pte_high = 0;
++	unsigned long ret = ioaddr;
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_ioremap(ioaddr, size);
++		if (unlikely(ret == -ENOSYS))
++			panic("hypercall %s failed with %ld. "
++			      "Please check Xen and Linux config mismatch\n",
++			      __func__, -ret);
++		else if (unlikely(IS_ERR_VALUE(ret)))
++			ret = ioaddr;
 +	}
++	return ret;
 +}
 +
-+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
++static inline unsigned long
++__HYPERVISOR_phystomach(unsigned long gpfn)
++{
++	return _hypercall2(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_phystomach, gpfn);
++}
 +
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline unsigned long
++HYPERVISOR_phystomach(unsigned long gpfn)
 +{
-+	pte_t pte = *ptep;
-+	if (!pte_none(pte)) {
-+		if (mm != &init_mm) {
-+			uint64_t val = __pte_val(pte);
-+			if (__cmpxchg64(ptep, val, 0) != val) {
-+				/* xchg acts as a barrier before the setting of the high bits */
-+				pte.pte_low = xchg(&ptep->pte_low, 0);
-+				pte.pte_high = ptep->pte_high;
-+				ptep->pte_high = 0;
-+			}
-+		} else
-+			HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
++	unsigned long ret = gpfn;
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_phystomach(gpfn);
 +	}
-+	return pte;
++	return ret;
 +}
 +
-+#define ptep_clear_flush(vma, addr, ptep)			\
-+({								\
-+	pte_t *__ptep = (ptep);					\
-+	pte_t __res = *__ptep;					\
-+	if (!pte_none(__res) &&					\
-+	    ((vma)->vm_mm != current->mm ||			\
-+	     HYPERVISOR_update_va_mapping(addr,	__pte(0),	\
-+			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+				UVMF_INVLPG|UVMF_MULTI))) {	\
-+		__ptep->pte_low = 0;				\
-+		smp_wmb();					\
-+		__ptep->pte_high = 0;				\
-+		flush_tlb_page(vma, addr);			\
-+	}							\
-+	__res;							\
-+})
-+
-+static inline int pte_same(pte_t a, pte_t b)
++static inline unsigned long
++__HYPERVISOR_machtophys(unsigned long mfn)
 +{
-+	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
++	return _hypercall2(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_machtophys, mfn);
 +}
 +
-+#define pte_page(x)	pfn_to_page(pte_pfn(x))
++static inline unsigned long
++HYPERVISOR_machtophys(unsigned long mfn)
++{
++	unsigned long ret = mfn;
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_machtophys(mfn);
++	}
++	return ret;
++}
 +
-+#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
-+			 ((_pte).pte_high << (32-PAGE_SHIFT)))
-+#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+	mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
++static inline unsigned long
++__HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++{
++	return _hypercall3(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_zap_physmap, gpfn, extent_order);
++}
 +
-+extern unsigned long long __supported_pte_mask;
++static inline unsigned long
++HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++{
++	unsigned long ret = 0;
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_zap_physmap(gpfn, extent_order);
++	}
++	return ret;
++}
 +
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++static inline unsigned long
++__HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
++			 unsigned long flags, domid_t domid)
 +{
-+	return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
-+			pgprot_val(pgprot)) & __supported_pte_mask);
++	return _hypercall5(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_add_physmap, gpfn, mfn, flags, domid);
 +}
 +
-+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++static inline unsigned long
++HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
++		       unsigned long flags, domid_t domid)
 +{
-+	return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
-+			pgprot_val(pgprot)) & __supported_pte_mask);
++	unsigned long ret = 0;
++	BUG_ON(!is_running_on_xen());//XXX
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
++	}
++	return ret;
 +}
 +
-+/*
-+ * Bits 0, 6 and 7 are taken in the low part of the pte,
-+ * put the 32 bits of offset into the high part.
-+ */
-+#define pte_to_pgoff(pte) ((pte).pte_high)
-+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
-+#define PTE_FILE_MAX_BITS       32
++static inline unsigned long
++__HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
++                                   unsigned long flags, domid_t domid)
++{
++	return _hypercall5(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_add_physmap_with_gmfn,
++	                   gpfn, gmfn, flags, domid);
++}
 +
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val) & 0x1f)
-+#define __swp_offset(x)			((x).val >> 5)
-+#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
-+#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
-+#define __swp_entry_to_pte(x)		((pte_t){ 0, (x).val })
++static inline unsigned long
++HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
++				 unsigned long flags, domid_t domid)
++{
++	unsigned long ret = 0;
++	BUG_ON(!is_running_on_xen());//XXX
++	if (is_running_on_xen()) {
++		ret = __HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn,
++		                                         flags, domid);
++	}
++	return ret;
++}
 +
-+#define __pmd_free_tlb(tlb, x)		do { } while (0)
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++static inline unsigned long
++HYPERVISOR_expose_p2m(unsigned long conv_start_gpfn,
++                      unsigned long assign_start_gpfn,
++                      unsigned long expose_size, unsigned long granule_pfn)
++{
++	return _hypercall5(unsigned long, ia64_dom0vp_op,
++	                   IA64_DOM0VP_expose_p2m, conv_start_gpfn,
++	                   assign_start_gpfn, expose_size, granule_pfn);
++}
 +
-+void vmalloc_sync_all(void);
++static inline int
++xencomm_arch_expose_foreign_p2m(unsigned long gpfn,
++				domid_t domid, struct xencomm_handle *arg,
++				unsigned long flags)
++{
++	return _hypercall5(int, ia64_dom0vp_op,
++			   IA64_DOM0VP_expose_foreign_p2m,
++			   gpfn, domid, arg, flags);
++}
 +
-+#endif /* _I386_PGTABLE_3LEVEL_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/pgtable.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,531 @@
-+#ifndef _I386_PGTABLE_H
-+#define _I386_PGTABLE_H
++static inline int
++HYPERVISOR_unexpose_foreign_p2m(unsigned long gpfn, domid_t domid)
++{
++	return _hypercall3(int, ia64_dom0vp_op,
++			   IA64_DOM0VP_unexpose_foreign_p2m, gpfn, domid);
++}
++#endif
 +
-+#include <asm/hypervisor.h>
++static inline int
++xencomm_arch_hypercall_perfmon_op(unsigned long cmd,
++                                  struct xencomm_handle *arg,
++                                  unsigned long count)
++{
++	return _hypercall4(int, ia64_dom0vp_op,
++			   IA64_DOM0VP_perfmon, cmd, arg, count);
++}
 +
-+/*
-+ * The Linux memory management assumes a three-level page table setup. On
-+ * the i386, we use that, but "fold" the mid level into the top-level page
-+ * table, so that we physically have the same two-level page table as the
-+ * i386 mmu expects.
-+ *
-+ * This file contains the functions and defines necessary to modify and use
-+ * the i386 page table tree.
-+ */
-+#ifndef __ASSEMBLY__
-+#include <asm/processor.h>
-+#include <asm/fixmap.h>
-+#include <linux/threads.h>
++static inline int
++xencomm_arch_hypercall_fpswa_revision(struct xencomm_handle *arg)
++{
++	return _hypercall2(int, ia64_dom0vp_op,
++			   IA64_DOM0VP_fpswa_revision, arg);
++}
 +
-+#ifndef _I386_BITOPS_H
-+#include <asm/bitops.h>
-+#endif
++static inline int
++xencomm_arch_hypercall_ia64_debug_op(unsigned long cmd,
++				     unsigned long domain,
++				     struct xencomm_handle *arg)
++{
++	return _hypercall3(int, ia64_debug_op, cmd, domain, arg);
++}
 +
-+#include <linux/slab.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
++static inline int
++HYPERVISOR_add_io_space(unsigned long phys_base,
++			unsigned long sparse,
++			unsigned long space_number)
++{
++	return _hypercall4(int, ia64_dom0vp_op, IA64_DOM0VP_add_io_space,
++			   phys_base, sparse, space_number);
++}
 +
-+/* Is this pagetable pinned? */
-+#define PG_pinned	PG_arch_1
++static inline int
++xencomm_arch_hypercall_kexec_op(int cmd, struct xencomm_handle *arg)
++{
++	return _hypercall2(int, kexec_op, cmd, arg);
++}
 +
-+struct mm_struct;
-+struct vm_area_struct;
++// for balloon driver
++#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
 +
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-+extern unsigned long empty_zero_page[1024];
-+extern pgd_t *swapper_pg_dir;
-+extern kmem_cache_t *pgd_cache;
-+extern kmem_cache_t *pmd_cache;
-+extern spinlock_t pgd_lock;
-+extern struct page *pgd_list;
++/* Use xencomm to do hypercalls.  */
++#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
++#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
++#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
++#define HYPERVISOR_multicall xencomm_hypercall_multicall
++#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
++#define HYPERVISOR_console_io xencomm_hypercall_console_io
++#define HYPERVISOR_hvm_op xencomm_hypercall_hvm_op
++#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
++#define HYPERVISOR_xenoprof_op xencomm_hypercall_xenoprof_op
++#define HYPERVISOR_perfmon_op xencomm_hypercall_perfmon_op
++#define HYPERVISOR_fpswa_revision xencomm_hypercall_fpswa_revision
++#define HYPERVISOR_suspend xencomm_hypercall_suspend
++#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
++#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
++#define HYPERVISOR_kexec_op xencomm_hypercall_kexec_op
 +
-+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
-+void pgtable_cache_init(void);
-+void paging_init(void);
++/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
++#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({BUG();0;})
 +
-+/*
-+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
-+ * implements both the traditional 2-level x86 page tables and the
-+ * newer 3-level PAE-mode page tables.
++#endif /* __HYPERCALL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/hypervisor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/hypervisor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * hypervisor.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level-defs.h>
-+# define PMD_SIZE	(1UL << PMD_SHIFT)
-+# define PMD_MASK	(~(PMD_SIZE-1))
-+#else
-+# include <asm/pgtable-2level-defs.h>
-+#endif
-+
-+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
-+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 +
-+#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
-+#define FIRST_USER_ADDRESS	0
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
 +
-+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
++#ifdef CONFIG_XEN
++/* running_on_xen is set before executing any C code by early_xen_setup */
++extern const int running_on_xen;
++#define is_running_on_xen()			(running_on_xen)
++#else /* CONFIG_XEN */
++# ifdef CONFIG_VMX_GUEST
++#  define is_running_on_xen()			(1)
++# else /* CONFIG_VMX_GUEST */
++#  define is_running_on_xen()			(0)
++#  define HYPERVISOR_ioremap(offset, size)	(offset)
++# endif /* CONFIG_VMX_GUEST */
++#endif /* CONFIG_XEN */
 +
-+#define TWOLEVEL_PGDIR_SHIFT	22
-+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
++#if defined(CONFIG_XEN) || defined(CONFIG_VMX_GUEST)
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/hypercall.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
 +
-+/* Just any arbitrary offset to the start of the vmalloc VM area: the
-+ * current 8MB value just means that there will be a 8MB "hole" after the
-+ * physical memory until the kernel virtual memory starts.  That means that
-+ * any out-of-bounds memory accesses will hopefully be caught.
-+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
-+ * area for the same reason. ;)
-+ */
-+#define VMALLOC_OFFSET	(8*1024*1024)
-+#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
-+			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
-+#ifdef CONFIG_HIGHMEM
-+# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
-+#else
-+# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
-+#endif
++extern shared_info_t *HYPERVISOR_shared_info;
++extern start_info_t *xen_start_info;
 +
-+/*
-+ * _PAGE_PSE set in the page directory entry just means that
-+ * the page directory entry points directly to a 4MB-aligned block of
-+ * memory. 
-+ */
-+#define _PAGE_BIT_PRESENT	0
-+#define _PAGE_BIT_RW		1
-+#define _PAGE_BIT_USER		2
-+#define _PAGE_BIT_PWT		3
-+#define _PAGE_BIT_PCD		4
-+#define _PAGE_BIT_ACCESSED	5
-+#define _PAGE_BIT_DIRTY		6
-+#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
-+#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
-+#define _PAGE_BIT_UNUSED2	10
-+#define _PAGE_BIT_UNUSED3	11
-+#define _PAGE_BIT_NX		63
++void force_evtchn_callback(void);
 +
-+#define _PAGE_PRESENT	0x001
-+#define _PAGE_RW	0x002
-+#define _PAGE_USER	0x004
-+#define _PAGE_PWT	0x008
-+#define _PAGE_PCD	0x010
-+#define _PAGE_ACCESSED	0x020
-+#define _PAGE_DIRTY	0x040
-+#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
-+#define _PAGE_UNUSED1	0x200	/* available for programmer */
-+#define _PAGE_UNUSED2	0x400
-+#define _PAGE_UNUSED3	0x800
++/* Turn jiffies into Xen system time. XXX Implement me. */
++#define jiffies_to_st(j)	0
 +
-+/* If _PAGE_PRESENT is clear, we use these: */
-+#define _PAGE_FILE	0x040	/* nonlinear file mapping, saved PTE; unset:swap */
-+#define _PAGE_PROTNONE	0x080	/* if the user mapped it with PROT_NONE;
-+				   pte_present gives true */
-+#ifdef CONFIG_X86_PAE
-+#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
-+#else
-+#define _PAGE_NX	0
-+#endif
++static inline int
++HYPERVISOR_yield(
++	void)
++{
++	int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
 +
-+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++	return rc;
++}
 +
-+#define PAGE_NONE \
-+	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED \
-+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++static inline int
++HYPERVISOR_block(
++	void)
++{
++	int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
 +
-+#define PAGE_SHARED_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY \
-+	PAGE_COPY_NOEXEC
-+#define PAGE_READONLY \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++	return rc;
++}
 +
-+#define _PAGE_KERNEL \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-+#define _PAGE_KERNEL_EXEC \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
++static inline int
++HYPERVISOR_shutdown(
++	unsigned int reason)
++{
++	struct sched_shutdown sched_shutdown = {
++		.reason = reason
++	};
 +
-+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-+#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
-+#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
-+#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
-+#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
++	int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
 +
-+#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
-+#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
++	return rc;
++}
 +
-+/*
-+ * The i386 can't do page protection for execute, and considers that
-+ * the same are read. Also, write permissions imply read permissions.
-+ * This is the closest we can get..
-+ */
-+#define __P000	PAGE_NONE
-+#define __P001	PAGE_READONLY
-+#define __P010	PAGE_COPY
-+#define __P011	PAGE_COPY
-+#define __P100	PAGE_READONLY_EXEC
-+#define __P101	PAGE_READONLY_EXEC
-+#define __P110	PAGE_COPY_EXEC
-+#define __P111	PAGE_COPY_EXEC
++static inline int
++HYPERVISOR_poll(
++	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++	struct sched_poll sched_poll = {
++		.nr_ports = nr_ports,
++		.timeout = jiffies_to_st(timeout)
++	};
 +
-+#define __S000	PAGE_NONE
-+#define __S001	PAGE_READONLY
-+#define __S010	PAGE_SHARED
-+#define __S011	PAGE_SHARED
-+#define __S100	PAGE_READONLY_EXEC
-+#define __S101	PAGE_READONLY_EXEC
-+#define __S110	PAGE_SHARED_EXEC
-+#define __S111	PAGE_SHARED_EXEC
++	int rc;
 +
-+/*
-+ * Define this if things work differently on an i386 and an i486:
-+ * it will (on an i486) warn about kernel memory accesses that are
-+ * done without a 'access_ok(VERIFY_WRITE,..)'
-+ */
-+#undef TEST_ACCESS_OK
++	set_xen_guest_handle(sched_poll.ports, ports);
++	rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
 +
-+/* The boot page tables (all created as a single array) */
-+extern unsigned long pg0[];
++	return rc;
++}
 +
-+#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#ifndef CONFIG_VMX_GUEST
++/* for drivers/xen/privcmd/privcmd.c */
++#define machine_to_phys_mapping 0
++struct vm_area_struct;
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++			   unsigned long address,
++			   unsigned long mfn,
++			   unsigned long size,
++			   pgprot_t prot,
++			   domid_t  domid);
++struct file;
++int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
++#define HAVE_ARCH_PRIVCMD_MMAP
 +
-+/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-+#define pmd_none(x)	(!(unsigned long)__pmd_val(x))
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+   can temporarily clear it. */
-+#define pmd_present(x)	(__pmd_val(x))
-+#define pmd_bad(x)	((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++/* for drivers/xen/balloon/balloon.c */
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
 +#else
-+#define pmd_present(x)	(__pmd_val(x) & _PAGE_PRESENT)
-+#define pmd_bad(x)	((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
++#define scrub_pages(_p,_n) ((void)0)
 +#endif
++#define	pte_mfn(_x)	pte_pfn(_x)
++#define phys_to_machine_mapping_valid(_x)	(1)
 +
++void xen_contiguous_bitmap_init(unsigned long end_pfn);
++int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits);
++static inline int
++xen_create_contiguous_region(unsigned long vstart,
++                             unsigned int order, unsigned int address_bits)
++{
++	int ret = 0;
++	if (is_running_on_xen()) {
++		ret = __xen_create_contiguous_region(vstart, order,
++		                                     address_bits);
++	}
++	return ret;
++}
 +
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
-+ */
-+static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
-+static inline int pte_huge(pte_t pte)		{ return (pte).pte_low & _PAGE_PSE; }
++void __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
++static inline void
++xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++	if (is_running_on_xen())
++		__xen_destroy_contiguous_region(vstart, order);
++}
 +
-+/*
-+ * The following only works if pte_present() is not true.
-+ */
-+static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
++struct page;
 +
-+static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
-+static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return pte; }
++int xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
++			       unsigned int address_bits);
 +
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level.h>
-+#else
-+# include <asm/pgtable-2level.h>
-+#endif
++/* For drivers/xen/core/machine_reboot.c */
++#define HAVE_XEN_POST_SUSPEND
++void xen_post_suspend(int suspend_cancelled);
 +
-+#define ptep_test_and_clear_dirty(vma, addr, ptep)			\
-+({									\
-+	pte_t __pte = *(ptep);						\
-+	int __ret = pte_dirty(__pte);					\
-+	if (__ret) {							\
-+		__pte = pte_mkclean(__pte);				\
-+		if ((vma)->vm_mm != current->mm ||			\
-+		    HYPERVISOR_update_va_mapping(addr, __pte, 0))	\
-+			(ptep)->pte_low = __pte.pte_low;		\
-+	}								\
-+	__ret;								\
-+})
++/* For setup_arch() in arch/ia64/kernel/setup.c */
++void xen_ia64_enable_opt_feature(void);
++#endif /* !CONFIG_VMX_GUEST */
 +
-+#define ptep_test_and_clear_young(vma, addr, ptep)			\
-+({									\
-+	pte_t __pte = *(ptep);						\
-+	int __ret = pte_young(__pte);					\
-+	if (__ret)							\
-+		__pte = pte_mkold(__pte);				\
-+		if ((vma)->vm_mm != current->mm ||			\
-+		    HYPERVISOR_update_va_mapping(addr, __pte, 0))	\
-+			(ptep)->pte_low = __pte.pte_low;		\
-+	__ret;								\
-+})
++#define __pte_ma(_x)	((pte_t) {(_x)})        /* unmodified use */
++#define pfn_pte_ma(_x,_y)	__pte_ma(0)     /* unmodified use */
 +
-+#define ptep_get_and_clear_full(mm, addr, ptep, full)			\
-+	((full) ? ({							\
-+		pte_t __res = *(ptep);					\
-+		if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
-+			xen_l1_entry_update(ptep, __pte(0));		\
-+		else							\
-+			*(ptep) = __pte(0);				\
-+		__res;							\
-+	 }) :								\
-+	 ptep_get_and_clear(mm, addr, ptep))
++/* for netfront.c, netback.c */
++#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
 +
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline void
++MULTI_update_va_mapping(
++	multicall_entry_t *mcl, unsigned long va,
++	pte_t new_val, unsigned long flags)
 +{
-+	pte_t pte = *ptep;
-+	if (pte_write(pte))
-+		set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++	mcl->op = __HYPERVISOR_update_va_mapping;
++	mcl->result = 0;
 +}
 +
-+/*
-+ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
-+ *
-+ *  dst - pointer to pgd range anwhere on a pgd page
-+ *  src - ""
-+ *  count - the number of pgds to copy.
-+ *
-+ * dst and src can be on the same page, but the range must not overlap,
-+ * and must not cross a page boundary.
-+ */
-+static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++	void *uop, unsigned int count)
 +{
-+       memcpy(dst, src, count * sizeof(pgd_t));
++	mcl->op = __HYPERVISOR_grant_table_op;
++	mcl->args[0] = cmd;
++	mcl->args[1] = (unsigned long)uop;
++	mcl->args[2] = count;
 +}
 +
 +/*
-+ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
-+ * it, this is a no-op.
-+ */
-+#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
-+				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
-+
-+/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
++ * for blktap.c
++ * int create_lookup_pte_addr(struct mm_struct *mm, 
++ *                            unsigned long address,
++ *                            uint64_t *ptep);
 + */
++#define create_lookup_pte_addr(mm, address, ptep)			\
++	({								\
++		printk(KERN_EMERG					\
++		       "%s:%d "						\
++		       "create_lookup_pte_addr() isn't supported.\n",	\
++		       __func__, __LINE__);				\
++		BUG();							\
++		(-ENOSYS);						\
++	})
 +
-+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
++/* for debug */
++asmlinkage int xprintk(const char *fmt, ...);
++#define xprintd(fmt, ...)	xprintk("%s:%d " fmt, __func__, __LINE__, \
++					##__VA_ARGS__)
 +
-+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{
-+	/*
-+	 * Since this might change the present bit (which controls whether
-+	 * a pte_t object has undergone p2m translation), we must use
-+	 * pte_val() on the input pte and __pte() for the return value.
-+	 */
-+	paddr_t pteval = pte_val(pte);
++#endif /* CONFIG_XEN || CONFIG_VMX_GUEST */
 +
-+	pteval &= _PAGE_CHG_MASK;
-+	pteval |= pgprot_val(newprot);
-+#ifdef CONFIG_X86_PAE
-+	pteval &= __supported_pte_mask;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain()						\
++	(is_running_on_xen() ? xen_start_info->flags & SIF_INITDOMAIN : 0)
++#else
++#define is_initial_xendomain() 0
 +#endif
-+	return __pte(pteval);
-+}
 +
-+#define pmd_large(pmd) \
-+((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++#endif /* __HYPERVISOR_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/intel_intrin.h
+--- a/include/asm-ia64/intel_intrin.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/intel_intrin.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -16,8 +16,8 @@
+ 		 	 * intrinsic
+ 		 	 */
+ 
+-#define ia64_getreg		__getReg
+-#define ia64_setreg		__setReg
++#define __ia64_getreg		__getReg
++#define __ia64_setreg		__setReg
+ 
+ #define ia64_hint		__hint
+ #define ia64_hint_pause		__hint_pause
+@@ -33,16 +33,16 @@
+ #define ia64_getf_exp		__getf_exp
+ #define ia64_shrp		_m64_shrp
+ 
+-#define ia64_tpa		__tpa
++#define __ia64_tpa		__tpa
+ #define ia64_invala		__invala
+ #define ia64_invala_gr		__invala_gr
+ #define ia64_invala_fr		__invala_fr
+ #define ia64_nop		__nop
+ #define ia64_sum		__sum
+-#define ia64_ssm		__ssm
++#define __ia64_ssm		__ssm
+ #define ia64_rum		__rum
+-#define ia64_rsm		__rsm
+-#define ia64_fc 		__fc
++#define __ia64_rsm		__rsm
++#define __ia64_fc 		__fc
+ 
+ #define ia64_ldfs		__ldfs
+ #define ia64_ldfd		__ldfd
+@@ -80,24 +80,24 @@
+ 
+ #define __ia64_set_dbr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_DBR, index, val)
+-#define ia64_set_ibr(index, val)	\
++#define __ia64_set_ibr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_IBR, index, val)
+-#define ia64_set_pkr(index, val)	\
++#define __ia64_set_pkr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PKR, index, val)
+-#define ia64_set_pmc(index, val)	\
++#define __ia64_set_pmc(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PMC, index, val)
+-#define ia64_set_pmd(index, val)	\
++#define __ia64_set_pmd(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PMD, index, val)
+-#define ia64_set_rr(index, val)	\
++#define __ia64_set_rr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_RR, index, val)
+ 
+-#define ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
++#define __ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
+ #define __ia64_get_dbr(index) 	__getIndReg(_IA64_REG_INDR_DBR, index)
+-#define ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
+-#define ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
+-#define ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
+-#define ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
+-#define ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
++#define __ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
++#define __ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
++#define __ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
++#define __ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
++#define __ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
+ 
+ #define ia64_srlz_d		__dsrlz
+ #define ia64_srlz_i		__isrlz
+@@ -116,18 +116,18 @@
+ #define ia64_ld8_acq		__ld8_acq
+ 
+ #define ia64_sync_i		__synci
+-#define ia64_thash		__thash
+-#define ia64_ttag		__ttag
+-#define ia64_itcd		__itcd
+-#define ia64_itci		__itci
+-#define ia64_itrd		__itrd
+-#define ia64_itri		__itri
+-#define ia64_ptce		__ptce
+-#define ia64_ptcl		__ptcl
+-#define ia64_ptcg		__ptcg
+-#define ia64_ptcga		__ptcga
+-#define ia64_ptri		__ptri
+-#define ia64_ptrd		__ptrd
++#define __ia64_thash		__thash
++#define __ia64_ttag		__ttag
++#define __ia64_itcd		__itcd
++#define __ia64_itci		__itci
++#define __ia64_itrd		__itrd
++#define __ia64_itri		__itri
++#define __ia64_ptce		__ptce
++#define __ia64_ptcl		__ptcl
++#define __ia64_ptcg		__ptcg
++#define __ia64_ptcga		__ptcga
++#define __ia64_ptri		__ptri
++#define __ia64_ptrd		__ptrd
+ #define ia64_dep_mi		_m64_dep_mi
+ 
+ /* Values for lfhint in __lfetch and __lfetch_fault */
+@@ -142,15 +142,17 @@
+ #define ia64_lfetch_fault	__lfetch_fault
+ #define ia64_lfetch_fault_excl	__lfetch_fault_excl
+ 
+-#define ia64_intrin_local_irq_restore(x)		\
++#define __ia64_intrin_local_irq_restore(x)		\
+ do {							\
+ 	if ((x) != 0) {					\
+-		ia64_ssm(IA64_PSR_I);			\
++		__ia64_ssm(IA64_PSR_I);			\
+ 		ia64_srlz_d();				\
+ 	} else {					\
+-		ia64_rsm(IA64_PSR_I);			\
++		__ia64_rsm(IA64_PSR_I);			\
+ 	}						\
+ } while (0)
 +
-+/*
-+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
-+ *
-+ * this macro returns the index of the entry in the pgd page which would
-+ * control the given virtual address
-+ */
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_index_k(addr) pgd_index(addr)
++#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
+ 
+ #define __builtin_trap()	__break(0);
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/intrinsics.h
+--- a/include/asm-ia64/intrinsics.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/intrinsics.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -17,6 +17,15 @@
+ #else
+ # include <asm/gcc_intrin.h>
+ #endif
 +
-+/*
-+ * pgd_offset() returns a (pgd_t *)
-+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
-+ */
-+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
++#define __ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4)	\
++do {								\
++	__ia64_set_rr(0x0000000000000000UL, (val0));		\
++	__ia64_set_rr(0x2000000000000000UL, (val1));		\
++	__ia64_set_rr(0x4000000000000000UL, (val2));		\
++	__ia64_set_rr(0x6000000000000000UL, (val3));		\
++	__ia64_set_rr(0x8000000000000000UL, (val4));		\
++} while (0)
+ 
+ /*
+  * Force an unresolved reference if someone tries to use
+@@ -177,4 +186,5 @@
+ #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+ 
+ #endif
++#include <asm/privop.h>
+ #endif /* _ASM_IA64_INTRINSICS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/io.h
+--- a/include/asm-ia64/io.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/io.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -96,9 +96,39 @@
+  * The following two macros are deprecated and scheduled for removal.
+  * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+  */
++#ifndef CONFIG_XEN
+ #define bus_to_virt	phys_to_virt
+ #define virt_to_bus	virt_to_phys
+ #define page_to_bus	page_to_phys
++#else
++#define bus_to_virt(bus)	\
++	phys_to_virt(machine_to_phys_for_dma(bus))
++#define virt_to_bus(virt)	\
++	phys_to_machine_for_dma(virt_to_phys(virt))
++#define page_to_bus(page)	\
++	phys_to_machine_for_dma(page_to_pseudophys(page))
 +
-+/*
-+ * a shortcut which implies the use of the kernel's pgd, instead
-+ * of a process's
-+ */
-+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++#define page_to_pseudophys(page) \
++	((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
 +
 +/*
-+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
-+ *
-+ * this macro returns the index of the entry in the pmd page which would
-+ * control the given virtual address
++ * Drivers that use page_to_phys() for bus addresses are broken.
++ * This includes:
++ * drivers/ide/cris/ide-cris.c
++ * drivers/scsi/dec_esp.c
 + */
-+#define pmd_index(address) \
-+		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+
++#define page_to_phys(page)	(page_to_pseudophys(page))
++#define bvec_to_bus(bv)		(page_to_bus((bv)->bv_page) + \
++				(unsigned long) (bv)->bv_offset)
++#define bio_to_pseudophys(bio)	(page_to_pseudophys(bio_page((bio))) +	\
++				 (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)  (page_to_pseudophys((bv)->bv_page) +	\
++				 (unsigned long) (bv)->bv_offset)
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
++	(((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) ==		\
++	  bvec_to_pseudophys((vec2))))
++#endif /* CONFIG_XEN */
+ 
+ # endif /* KERNEL */
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/iosapic.h
+--- a/include/asm-ia64/iosapic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/iosapic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -53,6 +53,7 @@
+ 
+ #define NR_IOSAPICS			256
+ 
++#ifndef CONFIG_XEN
+ static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
+ {
+ 	writel(reg, iosapic + IOSAPIC_REG_SELECT);
+@@ -64,6 +65,7 @@
+ 	writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ 	writel(val, iosapic + IOSAPIC_WINDOW);
+ }
++#endif
+ 
+ static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
+ {
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/irq.h
+--- a/include/asm-ia64/irq.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/irq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -11,8 +11,41 @@
+  * 02/29/00     D.Mosberger	moved most things into hw_irq.h
+  */
+ 
++#ifndef CONFIG_XEN
+ #define NR_IRQS		256
+ #define NR_IRQ_VECTORS	NR_IRQS
++#else
 +/*
-+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
-+ *
-+ * this macro returns the index of the entry in the pte page which would
-+ * control the given virtual address
++ * The flat IRQ space is divided into two regions:
++ *  1. A one-to-one mapping of real physical IRQs. This space is only used
++ *     if we have physical device-access privilege. This region is at the 
++ *     start of the IRQ space so that existing device drivers do not need
++ *     to be modified to translate physical IRQ numbers into our IRQ space.
++ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ *     are bound using the provided bind/unbind functions.
 + */
-+#define pte_index(address) \
-+		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) \
-+	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
 +
-+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++#define PIRQ_BASE		0
++#define NR_PIRQS		256
 +
-+#define pmd_page_kernel(pmd) \
-+		((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		(CONFIG_NR_CPUS * 8)
 +
-+/*
-+ * Helper function that returns the kernel pagetable entry controlling
-+ * the virtual address 'address'. NULL means no pagetable entry present.
-+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
-+ * as a pte too.
-+ */
-+extern pte_t *lookup_address(unsigned long address);
++#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS		NR_IRQS
 +
-+/*
-+ * Make a given kernel text page executable/non-executable.
-+ * Returns the previous executability setting of that page (which
-+ * is used to restore the previous state). Used by the SMP bootup code.
-+ * NOTE: this is an __init function for security reasons.
-+ */
-+#ifdef CONFIG_X86_PAE
-+ extern int set_kernel_exec(unsigned long vaddr, int enable);
-+#else
-+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
-+#endif
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
 +
-+extern void noexec_setup(const char *str);
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
-+#if defined(CONFIG_HIGHPTE)
-+#define pte_offset_map(dir, address) \
-+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
-+	 pte_index(address))
-+#define pte_offset_map_nested(dir, address) \
-+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
-+	 pte_index(address))
-+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-+#else
-+#define pte_offset_map(dir, address) \
-+	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-+#define pte_unmap(pte) do { } while (0)
-+#define pte_unmap_nested(pte) do { } while (0)
++#define RESCHEDULE_VECTOR	0
++#define IPI_VECTOR		1
++#define CMCP_VECTOR		2
++#define CPEP_VECTOR		3
++#define NR_IPIS			4
++#endif /* CONFIG_XEN */
+ 
+ static __inline__ int
+ irq_canonicalize (int irq)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/kexec.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/kexec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,58 @@
++#ifndef _ASM_IA64_KEXEC_H
++#define _ASM_IA64_KEXEC_H
++
++
++/* Maximum physical address we can use pages from */
++#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
++/* Maximum address we can reach in physical address mode */
++#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
++/* Maximum address we can use for the control code buffer */
++#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
++
++#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
++
++/* The native architecture */
++#define KEXEC_ARCH KEXEC_ARCH_IA_64
++
++#define MAX_NOTE_BYTES 1024
++
++#define kexec_flush_icache_page(page) do { \
++                unsigned long page_addr = (unsigned long)page_address(page); \
++                flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
++        } while(0)
++
++extern struct kimage *ia64_kimage;
++DECLARE_PER_CPU(u64, ia64_mca_pal_base);
++const extern unsigned int relocate_new_kernel_size;
++extern void relocate_new_kernel(unsigned long, unsigned long,
++		struct ia64_boot_param *, unsigned long);
++static inline void
++crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
++{
++}
++extern struct resource efi_memmap_res;
++extern struct resource boot_param_res;
++extern void kdump_smp_send_stop(void);
++extern void kdump_smp_send_init(void);
++extern void kexec_disable_iosapic(void);
++extern void crash_save_this_cpu(void);
++struct rsvd_region;
++extern unsigned long kdump_find_rsvd_region(unsigned long size,
++		struct rsvd_region *rsvd_regions, int n);
++extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
++extern int kdump_status[];
++extern atomic_t kdump_cpu_freezed;
++extern atomic_t kdump_in_progress;
++
++/* Kexec needs to know about the actual physical addresss.
++ * But in xen, on some architectures, a physical address is a
++ * pseudo-physical addresss. */
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page)  pfn_to_mfn_for_dma(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn_for_dma(pfn))
++#define kexec_virt_to_phys(addr) phys_to_machine_for_dma(__pa(addr))
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys_for_dma(addr))
++#endif
++
++#endif /* _ASM_IA64_KEXEC_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/machvec.h
+--- a/include/asm-ia64/machvec.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/machvec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -35,6 +35,7 @@
+ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+ 					u8 size);
+ typedef void ia64_mv_migrate_t(struct task_struct * task);
++typedef void ia64_mv_kernel_launch_event_t(void);
+ 
+ /* DMA-mapping interface: */
+ typedef void ia64_mv_dma_init (void);
+@@ -108,6 +109,8 @@
+ #  include <asm/machvec_hpzx1_swiotlb.h>
+ # elif defined (CONFIG_IA64_SGI_SN2)
+ #  include <asm/machvec_sn2.h>
++# elif defined (CONFIG_IA64_XEN)
++#  include <asm/machvec_xen.h>
+ # elif defined (CONFIG_IA64_GENERIC)
+ 
+ # ifdef MACHVEC_PLATFORM_HEADER
+@@ -205,6 +208,7 @@
+ 	ia64_mv_readq_relaxed_t *readq_relaxed;
+ 	ia64_mv_migrate_t *migrate;
+ 	ia64_mv_msi_init_t *msi_init;
++	ia64_mv_kernel_launch_event_t *kernel_launch_event;
+ } __attribute__((__aligned__(16))); /* align attrib? see above comment */
+ 
+ #define MACHVEC_INIT(name)			\
+@@ -302,6 +306,9 @@
+ #endif
+ #ifndef platform_tlb_migrate_finish
+ # define platform_tlb_migrate_finish	machvec_noop_mm
 +#endif
++#ifndef platform_kernel_launch_event
++# define platform_kernel_launch_event	machvec_noop
+ #endif
+ #ifndef platform_dma_init
+ # define platform_dma_init		swiotlb_init
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/machvec_sn2.h
+--- a/include/asm-ia64/machvec_sn2.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/machvec_sn2.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -67,6 +67,7 @@
+ extern ia64_mv_dma_mapping_error	sn_dma_mapping_error;
+ extern ia64_mv_dma_supported		sn_dma_supported;
+ extern ia64_mv_migrate_t		sn_migrate;
++extern ia64_mv_kernel_launch_event_t	sn_kernel_launch_event;
+ extern ia64_mv_msi_init_t		sn_msi_init;
+ 
+ 
+@@ -119,6 +120,7 @@
+ #define platform_dma_mapping_error		sn_dma_mapping_error
+ #define platform_dma_supported		sn_dma_supported
+ #define platform_migrate		sn_migrate
++#define platform_kernel_launch_event    sn_kernel_launch_event
+ #ifdef CONFIG_PCI_MSI
+ #define platform_msi_init		sn_msi_init
+ #else
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/machvec_xen.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/machvec_xen.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,43 @@
++#ifndef _ASM_IA64_MACHVEC_XEN_h
++#define _ASM_IA64_MACHVEC_XEN_h
 +
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(vma, address, ptep, pteval)			\
-+	do {								\
-+		if ( likely((vma)->vm_mm == current->mm) ) {		\
-+			BUG_ON(HYPERVISOR_update_va_mapping(address,	\
-+				pteval,					\
-+				(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+					UVMF_INVLPG|UVMF_MULTI));	\
-+		} else {						\
-+			xen_l1_entry_update(ptep, pteval);		\
-+			flush_tlb_page(vma, address);			\
-+		}							\
-+	} while (0)
-+
-+/*
-+ * The i386 doesn't have any external MMU info: the kernel page
-+ * tables contain all the necessary information.
-+ *
-+ * Also, we only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time.
-+ */
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
-+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
-+	do {								\
-+		if (dirty)						\
-+			ptep_establish(vma, address, ptep, entry);	\
-+	} while (0)
++extern ia64_mv_setup_t			xen_setup;
++extern ia64_mv_cpu_init_t		xen_cpu_init;
++extern ia64_mv_irq_init_t		xen_irq_init;
++extern ia64_mv_send_ipi_t		xen_platform_send_ipi;
++extern ia64_mv_dma_alloc_coherent	xen_alloc_coherent;
++extern ia64_mv_dma_free_coherent	xen_free_coherent;
++extern ia64_mv_dma_map_single		xen_map_single;
++extern ia64_mv_dma_unmap_single		xen_unmap_single;
++extern ia64_mv_dma_map_sg		xen_map_sg;
++extern ia64_mv_dma_unmap_sg		xen_unmap_sg;
++extern ia64_mv_dma_supported		xen_dma_supported;
++extern ia64_mv_dma_mapping_error	xen_dma_mapping_error;
++
++/*
++ * This stuff has dual use!
++ *
++ * For a generic kernel, the macros are used to initialize the
++ * platform's machvec structure.  When compiling a non-generic kernel,
++ * the macros are used directly.
++ */
++#define platform_name				"xen"
++#define platform_setup				xen_setup
++#define platform_cpu_init			xen_cpu_init
++#define platform_irq_init			xen_irq_init
++#define platform_send_ipi			xen_platform_send_ipi
++#define platform_dma_init			machvec_noop
++#define platform_dma_alloc_coherent		xen_alloc_coherent
++#define platform_dma_free_coherent		xen_free_coherent
++#define platform_dma_map_single			xen_map_single
++#define platform_dma_unmap_single		xen_unmap_single
++#define platform_dma_map_sg			xen_map_sg
++#define platform_dma_unmap_sg			xen_unmap_sg
++#define platform_dma_sync_single_for_cpu	machvec_dma_sync_single
++#define platform_dma_sync_sg_for_cpu		machvec_dma_sync_sg
++#define platform_dma_sync_single_for_device	machvec_dma_sync_single
++#define platform_dma_sync_sg_for_device		machvec_dma_sync_sg
++#define platform_dma_supported			xen_dma_supported
++#define platform_dma_mapping_error		xen_dma_mapping_error
++
++#endif /* _ASM_IA64_MACHVEC_XEN_h */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/maddr.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/maddr.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,116 @@
++#ifndef _ASM_IA64_MADDR_H
++#define _ASM_IA64_MADDR_H
 +
++#include <linux/kernel.h>
++#include <asm/hypervisor.h>
 +#include <xen/features.h>
-+void make_lowmem_page_readonly(void *va, unsigned int feature);
-+void make_lowmem_page_writable(void *va, unsigned int feature);
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define virt_to_ptep(__va)						\
-+({									\
-+	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
-+	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
-+	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
-+	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
-+})
++#include <xen/interface/xen.h>
 +
-+#define arbitrary_virt_to_machine(__va)					\
-+({									\
-+	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
-+})
++#ifdef CONFIG_XEN
 +
-+#endif /* !__ASSEMBLY__ */
++#define INVALID_P2M_ENTRY       (~0UL)
 +
-+#ifdef CONFIG_FLATMEM
-+#define kern_addr_valid(addr)	(1)
-+#endif /* CONFIG_FLATMEM */
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++extern int p2m_initialized;
++extern unsigned long p2m_min_low_pfn;
++extern unsigned long p2m_max_low_pfn;
++extern unsigned long p2m_convert_min_pfn;
++extern unsigned long p2m_convert_max_pfn;
++extern volatile const pte_t* p2m_pte;
++unsigned long p2m_phystomach(unsigned long gpfn);
++#else
++#define p2m_initialized		(0)
++#define p2m_phystomach(gpfn)	INVALID_MFN
++#endif
 +
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+                           unsigned long address, 
-+                           unsigned long mfn,
-+                           unsigned long size, 
-+                           pgprot_t prot,
-+                           domid_t  domid);
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid);
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+                           unsigned long address,
-+                           uint64_t *ptep);
-+int touch_pte_range(struct mm_struct *mm,
-+                    unsigned long address,
-+                    unsigned long size);
++/* XXX xen page size != page size */
++static inline unsigned long
++pfn_to_mfn_for_dma(unsigned long pfn)
++{
++	unsigned long mfn;
++	if (p2m_initialized)
++		return p2m_phystomach(pfn);
++	mfn = HYPERVISOR_phystomach(pfn);
++	BUG_ON(mfn == 0); /* XXX */
++	BUG_ON(mfn == INVALID_P2M_ENTRY); /* XXX */
++	BUG_ON(mfn == INVALID_MFN);
++	return mfn;
++}
 +
-+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++static inline unsigned long
++phys_to_machine_for_dma(unsigned long phys)
++{
++	unsigned long machine =
++	              pfn_to_mfn_for_dma(phys >> PAGE_SHIFT) << PAGE_SHIFT;
++	machine |= (phys & ~PAGE_MASK);
++	return machine;
++}
 +
-+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
-+#define GET_IOSPACE(pfn)		0
-+#define GET_PFN(pfn)			(pfn)
++static inline unsigned long
++mfn_to_pfn_for_dma(unsigned long mfn)
++{
++	unsigned long pfn;
++	pfn = HYPERVISOR_machtophys(mfn);
++	BUG_ON(pfn == 0);
++	/* BUG_ON(pfn == INVALID_M2P_ENTRY); */
++	return pfn;
++}
 +
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
-+#include <asm-generic/pgtable.h>
++static inline unsigned long
++machine_to_phys_for_dma(unsigned long machine)
++{
++	unsigned long phys =
++	              mfn_to_pfn_for_dma(machine >> PAGE_SHIFT) << PAGE_SHIFT;
++	phys |= (machine & ~PAGE_MASK);
++	return phys;
++}
 +
-+#endif /* _I386_PGTABLE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/processor.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/processor.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/processor.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,741 @@
++#ifdef CONFIG_SPARSEMEM
 +/*
-+ * include/asm-i386/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
++ * When CONFIG_SPARSEMEM=y, pfn_valid() is defined in
++ * linux/include/linux/mmzone.h. Hoever #include <linux/mmzone.h>
++ * causes the header inclusion hell.
 + */
++static inline int pfn_valid(unsigned long pfn);
++#endif
 +
-+#ifndef __ASM_I386_PROCESSOR_H
-+#define __ASM_I386_PROCESSOR_H
++static inline unsigned long
++mfn_to_local_pfn(unsigned long mfn)
++{
++	unsigned long pfn = mfn_to_pfn_for_dma(mfn);
++	if (!pfn_valid(pfn))
++		return INVALID_P2M_ENTRY;
++	return pfn;
++}
 +
-+#include <asm/vm86.h>
-+#include <asm/math_emu.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <asm/msr.h>
-+#include <asm/system.h>
-+#include <linux/cache.h>
-+#include <linux/threads.h>
-+#include <asm/percpu.h>
-+#include <linux/cpumask.h>
-+#include <xen/interface/physdev.h>
++#else /* !CONFIG_XEN */
 +
-+/* flag for disabling the tsc */
-+extern int tsc_disable;
++#define pfn_to_mfn_for_dma(pfn) (pfn)
++#define mfn_to_pfn_for_dma(mfn) (mfn)
++#define phys_to_machine_for_dma(phys) (phys)
++#define machine_to_phys_for_dma(machine) (machine)
++#define mfn_to_local_pfn(mfn) (mfn)
 +
-+struct desc_struct {
-+	unsigned long a,b;
-+};
++#endif /* !CONFIG_XEN */
 +
-+#define desc_empty(desc) \
-+		(!((desc)->a | (desc)->b))
++#define mfn_to_pfn(mfn) (mfn)
++#define pfn_to_mfn(pfn) (pfn)
 +
-+#define desc_equal(desc1, desc2) \
-+		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-+/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
-+ */
-+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
++#define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
++#define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
++#define virt_to_machine(virt) __pa(virt) /* for tpmfront.c */
 +
-+/*
-+ *  CPU type and hardware bug flags. Kept separately for each CPU.
-+ *  Members of this structure are referenced in head.S, so think twice
-+ *  before touching them. [mj]
-+ */
++#define set_phys_to_machine(pfn, mfn) do { } while (0)
 +
-+struct cpuinfo_x86 {
-+	__u8	x86;		/* CPU family */
-+	__u8	x86_vendor;	/* CPU vendor */
-+	__u8	x86_model;
-+	__u8	x86_mask;
-+	char	wp_works_ok;	/* It doesn't on 386's */
-+	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
-+	char	hard_math;
-+	char	rfu;
-+       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-+	unsigned long	x86_capability[NCAPINTS];
-+	char	x86_vendor_id[16];
-+	char	x86_model_id[64];
-+	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
-+				    call  */
-+	int 	x86_cache_alignment;	/* In bytes */
-+	char	fdiv_bug;
-+	char	f00f_bug;
-+	char	coma_bug;
-+	char	pad0;
-+	int	x86_power;
-+	unsigned long loops_per_jiffy;
-+#ifdef CONFIG_SMP
-+	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
-+#endif
-+	unsigned char x86_max_cores;	/* cpuid returned max cores value */
-+	unsigned char apicid;
-+#ifdef CONFIG_SMP
-+	unsigned char booted_cores;	/* number of cores as seen by OS */
-+	__u8 phys_proc_id; 		/* Physical processor id. */
-+	__u8 cpu_core_id;  		/* Core id */
++typedef unsigned long maddr_t;	/* to compile netback, netfront */
++#ifndef _ASM_IA64_SN_TYPES_H /* paddr_t is defined in asm-ia64/sn/types.h */
++typedef unsigned long paddr_t;
 +#endif
-+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-+
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NSC 8
-+#define X86_VENDOR_NUM 9
-+#define X86_VENDOR_UNKNOWN 0xff
 +
-+/*
-+ * capabilities of CPUs
-+ */
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-+extern struct cpuinfo_x86 new_cpu_data;
-+#ifndef CONFIG_X86_NO_TSS
-+extern struct tss_struct doublefault_tss;
-+DECLARE_PER_CPU(struct tss_struct, init_tss);
++#ifdef CONFIG_XEN
++int range_straddles_page_boundary(paddr_t p, size_t size);
++#else
++#define range_straddles_page_boundary(addr, size)	(0)
 +#endif
 +
-+#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
++#endif /* _ASM_IA64_MADDR_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/meminit.h
+--- a/include/asm-ia64/meminit.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/meminit.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -15,11 +15,17 @@
+  * 	- initrd (optional)
+  * 	- command line string
+  * 	- kernel code & data
++ * 	- crash dumping code reserved region
+  * 	- Kernel memory map built from EFI memory map
++ *	- xen start info
+  *
+  * More could be added if necessary
+  */
+-#define IA64_MAX_RSVD_REGIONS 6
++#ifndef CONFIG_XEN
++#define IA64_MAX_RSVD_REGIONS 7
 +#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
++#define IA64_MAX_RSVD_REGIONS 8
++#endif
+ 
+ struct rsvd_region {
+ 	unsigned long start;	/* virtual address of beginning of element */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/mmu_context.h
+--- a/include/asm-ia64/mmu_context.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/mmu_context.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -151,11 +151,7 @@
+ #  endif
+ #endif
+ 
+-	ia64_set_rr(0x0000000000000000UL, rr0);
+-	ia64_set_rr(0x2000000000000000UL, rr1);
+-	ia64_set_rr(0x4000000000000000UL, rr2);
+-	ia64_set_rr(0x6000000000000000UL, rr3);
+-	ia64_set_rr(0x8000000000000000UL, rr4);
++	ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
+ 	ia64_srlz_i();			/* srlz.i implies srlz.d */
+ }
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/page.h
+--- a/include/asm-ia64/page.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/page.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -119,6 +119,7 @@
+ #endif
+ 
+ #ifdef CONFIG_FLATMEM
++extern unsigned long max_mapnr;
+ # define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+ #elif defined(CONFIG_DISCONTIGMEM)
+ extern unsigned long min_low_pfn;
+@@ -126,7 +127,9 @@
+ # define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+ #endif
+ 
++#ifndef CONFIG_XEN
+ #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
 +#endif
+ #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+ #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+ 
+@@ -227,5 +230,17 @@
+ 					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\
+ 					  ? VM_EXEC : 0))
+ 
++#ifndef __ASSEMBLY__
 +
-+extern	int cpu_llc_id[NR_CPUS];
-+extern char ignore_fpu_irq;
++#include <linux/kernel.h>
++#include <asm/hypervisor.h>	/* to compile ioremap.c */
 +
-+extern void identify_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+extern unsigned short num_cache_leaves;
++#ifdef CONFIG_XEN
 +
-+#ifdef CONFIG_X86_HT
-+extern void detect_ht(struct cpuinfo_x86 *c);
++#include <asm/maddr.h>
++
++#endif /* CONFIG_XEN */
++#endif /* __ASSEMBLY__ */
++
+ # endif /* __KERNEL__ */
+ #endif /* _ASM_IA64_PAGE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/pgalloc.h
+--- a/include/asm-ia64/pgalloc.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/pgalloc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -125,7 +125,11 @@
+ static inline void
+ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
+ {
++#ifndef CONFIG_XEN
+ 	pmd_val(*pmd_entry) = page_to_phys(pte);
 +#else
-+static inline void detect_ht(struct cpuinfo_x86 *c) {}
++	pmd_val(*pmd_entry) = page_to_pseudophys(pte);
 +#endif
+ }
+ 
+ static inline void
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/privop.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/privop.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,63 @@
++#ifndef _ASM_IA64_PRIVOP_H
++#define _ASM_IA64_PRIVOP_H
 +
++#ifndef _ASM_IA64_INTRINSICS_H
++#error "don't include privop.h directly. instead include intrinsics.h"
++#endif
 +/*
-+ * EFLAGS bits
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at hp.com>
++ *
 + */
-+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
-+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
-+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
-+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
-+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
-+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
-+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
-+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
-+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
-+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
-+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
-+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
-+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
-+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
-+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
-+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
-+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
 +
-+/*
-+ * Generic CPUID function
-+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
-+ * resulting in stale register contents being returned.
-+ */
-+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-+{
-+	__asm__(XEN_CPUID
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c"(0));
-+}
++#ifdef CONFIG_XEN
++#include <asm/xen/privop.h>
++#endif
 +
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+	       	int *edx)
-+{
-+	__asm__(XEN_CPUID
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c" (count));
-+}
++#ifndef __ASSEMBLY
 +
-+/*
-+ * CPUID functions returning a single datum
-+ */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+	unsigned int eax;
++#ifndef IA64_PARAVIRTUALIZED
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax)
-+		: "0" (op)
-+		: "bx", "cx", "dx");
-+	return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
-+{
-+	unsigned int eax, ebx;
++#define ia64_getreg			__ia64_getreg
++#define ia64_setreg			__ia64_setreg
++#define ia64_hint			__ia64_hint
++#define ia64_thash			__ia64_thash
++#define ia64_itci			__ia64_itci
++#define ia64_itcd			__ia64_itcd
++#define ia64_itri			__ia64_itri
++#define ia64_itrd			__ia64_itrd
++#define ia64_tpa			__ia64_tpa
++#define ia64_set_ibr			__ia64_set_ibr
++#define ia64_set_pkr			__ia64_set_pkr
++#define ia64_set_pmc			__ia64_set_pmc
++#define ia64_set_pmd			__ia64_set_pmd
++#define ia64_set_rr			__ia64_set_rr
++#define ia64_get_cpuid			__ia64_get_cpuid
++#define ia64_get_ibr			__ia64_get_ibr
++#define ia64_get_pkr			__ia64_get_pkr
++#define ia64_get_pmc			__ia64_get_pmc
++#define ia64_get_pmd			__ia64_get_pmd
++#define ia64_get_rr			__ia64_get_rr
++#define ia64_fc				__ia64_fc
++#define ia64_ssm			__ia64_ssm
++#define ia64_rsm			__ia64_rsm
++#define ia64_ptce			__ia64_ptce
++#define ia64_ptcga			__ia64_ptcga
++#define ia64_ptcl			__ia64_ptcl
++#define ia64_ptri			__ia64_ptri
++#define ia64_ptrd			__ia64_ptrd
++#define ia64_get_psr_i			__ia64_get_psr_i
++#define ia64_intrin_local_irq_restore	__ia64_intrin_local_irq_restore
++#define ia64_leave_kernel		__ia64_leave_kernel
++#define ia64_leave_syscall		__ia64_leave_syscall
++#define ia64_trace_syscall		__ia64_trace_syscall
++#define ia64_ret_from_clone		__ia64_ret_from_clone
++#define ia64_switch_to			__ia64_switch_to
++#define ia64_pal_call_static		__ia64_pal_call_static
++#define ia64_set_rr0_to_rr4		__ia64_set_rr0_to_rr4
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=b" (ebx)
-+		: "0" (op)
-+		: "cx", "dx" );
-+	return ebx;
-+}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+	unsigned int eax, ecx;
++#endif /* !IA64_PARAVIRTUALIZED */
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=c" (ecx)
-+		: "0" (op)
-+		: "bx", "dx" );
-+	return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
++#endif /* !__ASSEMBLY */
++
++#endif /* _ASM_IA64_PRIVOP_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/sal.h
+--- a/include/asm-ia64/sal.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/sal.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -42,6 +42,9 @@
+ #include <asm/pal.h>
+ #include <asm/system.h>
+ #include <asm/fpu.h>
++#ifdef CONFIG_XEN
++#include <asm/xen/xencomm.h>
++#endif
+ 
+ extern spinlock_t sal_lock;
+ 
+@@ -687,10 +690,43 @@
+ /* Get the processor and platform information logged by SAL with respect to the machine
+  * state at the time of the MCAs, INITs, CMCs, or CPEs.
+  */
++#ifdef CONFIG_XEN
++static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type);
++typedef struct ia64_mca_xencomm_t {
++	void *record;
++	struct xencomm_handle *handle;
++	struct list_head list;
++} ia64_mca_xencomm_t;
++extern struct list_head ia64_mca_xencomm_list;
++extern spinlock_t ia64_mca_xencomm_lock;
++#endif
++
+ static inline u64
+ ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+ {
+ 	struct ia64_sal_retval isrv;
++#ifdef CONFIG_XEN
++	if (is_running_on_xen()) {
++		ia64_mca_xencomm_t *entry;
++		struct xencomm_handle *desc = NULL;
++		unsigned long flags;
++
++		spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++		list_for_each_entry(entry, &ia64_mca_xencomm_list, list) {
++			if (entry->record == sal_info) {
++				desc = entry->handle;
++				break;
++			}
++		}
++		spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++
++		if (desc == NULL)
++			return 0;
++
++		SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
++		                   desc, 0, 0, 0, 0);
++	} else
++#endif
+ 	SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+ 	              sal_info, 0, 0, 0, 0);
+ 	if (isrv.status)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/sn/sn_sal.h
+--- a/include/asm-ia64/sn/sn_sal.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/sn/sn_sal.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -86,6 +86,8 @@
+ #define  SN_SAL_SET_OS_FEATURE_SET		   0x02000066
+ #define  SN_SAL_INJECT_ERROR			   0x02000067
+ #define  SN_SAL_SET_CPU_NUMBER			   0x02000068
++
++#define  SN_SAL_KERNEL_LAUNCH_EVENT		   0x02000069
+ 
+ /*
+  * Service-specific constants
+@@ -1154,4 +1156,11 @@
+ 	SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+ 	return rv.status;
+ }
++static inline int
++ia64_sn_kernel_launch_event(void)
 +{
-+	unsigned int eax, edx;
++ 	struct ia64_sal_retval rv;
++	SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
++	return rv.status;
++}
+ #endif /* _ASM_IA64_SN_SN_SAL_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/sn/types.h
+--- a/include/asm-ia64/sn/types.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/sn/types.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -20,7 +20,9 @@
+ typedef unsigned char	slabid_t;	/* slab (asic) within slot */
+ typedef u64 nic_t;
+ typedef unsigned long iopaddr_t;
++#ifndef _ASM_IA64_MADDR_H /* paddr_t is defined in asm-ia64/maddr.h */
+ typedef unsigned long paddr_t;
++#endif
+ typedef short cnodeid_t;
+ 
+ #endif /* _ASM_IA64_SN_TYPES_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/swiotlb.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/swiotlb.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,41 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++				      int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++				  size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++					 dma_addr_t dev_addr,
++					 size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++					    dma_addr_t dev_addr,
++					    size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++				     struct scatterlist *sg, int nelems,
++				     int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++					struct scatterlist *sg, int nelems,
++					int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++		      int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++			 int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++                                   unsigned long offset, size_t size,
++                                   enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++                               size_t size, enum dma_data_direction direction);
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=d" (edx)
-+		: "0" (op)
-+		: "bx", "cx");
-+	return edx;
-+}
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
 +
-+#define load_cr3(pgdir) write_cr3(__pa(pgdir))
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/synch_bitops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/synch_bitops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,61 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
 +
 +/*
-+ * Intel CPU features in CR4
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
 + */
-+#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
-+#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
-+#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
-+#define X86_CR4_DE		0x0008	/* enable debugging extensions */
-+#define X86_CR4_PSE		0x0010	/* enable page size extensions */
-+#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
-+#define X86_CR4_MCE		0x0040	/* Machine check enable */
-+#define X86_CR4_PGE		0x0080	/* enable global pages */
-+#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
-+#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
-+#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
 +
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
-+ */
-+extern unsigned long mmu_cr4_features;
++#define ADDR (*(volatile long *) addr)
 +
-+static inline void set_in_cr4 (unsigned long mask)
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
 +{
-+	unsigned cr4;
-+	mmu_cr4_features |= mask;
-+	cr4 = read_cr4();
-+	cr4 |= mask;
-+	write_cr4(cr4);
++	set_bit(nr, addr);
 +}
 +
-+static inline void clear_in_cr4 (unsigned long mask)
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
 +{
-+	unsigned cr4;
-+	mmu_cr4_features &= ~mask;
-+	cr4 = read_cr4();
-+	cr4 &= ~mask;
-+	write_cr4(cr4);
++	clear_bit(nr, addr);
 +}
 +
-+/*
-+ *      NSC/Cyrix CPU configuration register indexes
-+ */
-+
-+#define CX86_PCR0 0x20
-+#define CX86_GCR  0xb8
-+#define CX86_CCR0 0xc0
-+#define CX86_CCR1 0xc1
-+#define CX86_CCR2 0xc2
-+#define CX86_CCR3 0xc3
-+#define CX86_CCR4 0xe8
-+#define CX86_CCR5 0xe9
-+#define CX86_CCR6 0xea
-+#define CX86_CCR7 0xeb
-+#define CX86_PCR1 0xf0
-+#define CX86_DIR0 0xfe
-+#define CX86_DIR1 0xff
-+#define CX86_ARR_BASE 0xc4
-+#define CX86_RCR_BASE 0xdc
-+
-+/*
-+ *      NSC/Cyrix CPU indexed register access macros
-+ */
-+
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++	change_bit(nr, addr);
++}
 +
-+#define setCx86(reg, data) do { \
-+	outb((reg), 0x22); \
-+	outb((data), 0x23); \
-+} while (0)
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++    return test_and_set_bit(nr, addr);
++}
 +
-+/* Stop speculative execution */
-+static inline void sync_core(void)
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
 +{
-+	int tmp;
-+	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++    return test_and_clear_bit(nr, addr);
 +}
 +
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+		unsigned long edx)
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
 +{
-+	/* "monitor %eax,%ecx,%edx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc8;"
-+		: :"a" (eax), "c" (ecx), "d"(edx));
++    return test_and_change_bit(nr, addr);
 +}
 +
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
++static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
 +{
-+	/* "mwait %eax,%ecx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc9;"
-+		: :"a" (eax), "c" (ecx));
++    return test_bit(nr, addr);
 +}
 +
-+/* from system description table in BIOS.  Mostly for MCA use, but
-+others may find it useful. */
-+extern unsigned int machine_id;
-+extern unsigned int machine_submodel_id;
-+extern unsigned int BIOS_revision;
-+extern unsigned int mca_pentium_flag;
++static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++{
++    return test_bit(nr, addr);
++}
 +
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
++#define synch_cmpxchg	ia64_cmpxchg4_acq
 +
-+/*
-+ * User space process size: 3GB (default).
-+ */
-+#define TASK_SIZE	(PAGE_OFFSET)
++#define synch_test_bit(nr,addr) \
++(__builtin_constant_p(nr) ? \
++ synch_const_test_bit((nr),(addr)) : \
++ synch_var_test_bit((nr),(addr)))
 +
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
++#define synch_cmpxchg_subword synch_cmpxchg
 +
-+#define HAVE_ARCH_PICK_MMAP_LAYOUT
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/system.h
+--- a/include/asm-ia64/system.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-ia64/system.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -123,7 +123,7 @@
+ #define __local_irq_save(x)			\
+ do {						\
+ 	ia64_stop();				\
+-	(x) = ia64_getreg(_IA64_REG_PSR);	\
++	(x) = ia64_get_psr_i();			\
+ 	ia64_stop();				\
+ 	ia64_rsm(IA64_PSR_I);			\
+ } while (0)
+@@ -171,7 +171,7 @@
+ #endif /* !CONFIG_IA64_DEBUG_IRQ */
+ 
+ #define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
+-#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
++#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_get_psr_i(); })
+ 
+ #define irqs_disabled()				\
+ ({						\
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/xen/privop.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/xen/privop.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,550 @@
++#ifndef _ASM_IA64_XEN_PRIVOP_H
++#define _ASM_IA64_XEN_PRIVOP_H
 +
 +/*
-+ * Size of io_bitmap.
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at hp.com>
++ *
++ * Paravirtualizations of privileged operations for Xen/ia64
++ *
 + */
-+#define IO_BITMAP_BITS  65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
-+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
-+
-+struct i387_fsave_struct {
-+	long	cwd;
-+	long	swd;
-+	long	twd;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-+	long	status;		/* software status information */
-+};
 +
-+struct i387_fxsave_struct {
-+	unsigned short	cwd;
-+	unsigned short	swd;
-+	unsigned short	twd;
-+	unsigned short	fop;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	mxcsr;
-+	long	mxcsr_mask;
-+	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
-+	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
-+	long	padding[56];
-+} __attribute__ ((aligned (16)));
++#ifndef __ASSEMBLY__
++#include <linux/types.h>		/* arch-ia64.h requires uint64_t */
++#include <linux/stringify.h>
++#endif
++#include <xen/interface/arch-ia64.h>
 +
-+struct i387_soft_struct {
-+	long	cwd;
-+	long	swd;
-+	long	twd;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-+	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
-+	struct info	*info;
-+	unsigned long	entry_eip;
-+};
++#define IA64_PARAVIRTUALIZED
 +
-+union i387_union {
-+	struct i387_fsave_struct	fsave;
-+	struct i387_fxsave_struct	fxsave;
-+	struct i387_soft_struct soft;
-+};
++/* At 1 MB, before per-cpu space but still addressable using addl instead
++   of movl. */
++#define XSI_BASE				0xfffffffffff00000
 +
-+typedef struct {
-+	unsigned long seg;
-+} mm_segment_t;
++/* Address of mapped regs.  */
++#define XMAPPEDREGS_BASE		(XSI_BASE + XSI_SIZE)
 +
-+struct thread_struct;
++#ifdef __ASSEMBLY__
++#define	XEN_HYPER_RFI			break HYPERPRIVOP_RFI
++#define	XEN_HYPER_RSM_PSR_DT		break HYPERPRIVOP_RSM_DT
++#define	XEN_HYPER_SSM_PSR_DT		break HYPERPRIVOP_SSM_DT
++#define	XEN_HYPER_COVER			break HYPERPRIVOP_COVER
++#define	XEN_HYPER_ITC_D			break HYPERPRIVOP_ITC_D
++#define	XEN_HYPER_ITC_I			break HYPERPRIVOP_ITC_I
++#define	XEN_HYPER_SSM_I			break HYPERPRIVOP_SSM_I
++#define	XEN_HYPER_GET_IVR		break HYPERPRIVOP_GET_IVR
++#define	XEN_HYPER_GET_TPR		break HYPERPRIVOP_GET_TPR
++#define	XEN_HYPER_SET_TPR		break HYPERPRIVOP_SET_TPR
++#define	XEN_HYPER_EOI			break HYPERPRIVOP_EOI
++#define	XEN_HYPER_SET_ITM		break HYPERPRIVOP_SET_ITM
++#define	XEN_HYPER_THASH			break HYPERPRIVOP_THASH
++#define	XEN_HYPER_PTC_GA		break HYPERPRIVOP_PTC_GA
++#define	XEN_HYPER_ITR_D			break HYPERPRIVOP_ITR_D
++#define	XEN_HYPER_GET_RR		break HYPERPRIVOP_GET_RR
++#define	XEN_HYPER_SET_RR		break HYPERPRIVOP_SET_RR
++#define	XEN_HYPER_SET_KR		break HYPERPRIVOP_SET_KR
++#define	XEN_HYPER_FC			break HYPERPRIVOP_FC
++#define	XEN_HYPER_GET_CPUID		break HYPERPRIVOP_GET_CPUID
++#define	XEN_HYPER_GET_PMD		break HYPERPRIVOP_GET_PMD
++#define	XEN_HYPER_GET_EFLAG		break HYPERPRIVOP_GET_EFLAG
++#define	XEN_HYPER_SET_EFLAG		break HYPERPRIVOP_SET_EFLAG
++#define	XEN_HYPER_GET_PSR		break HYPERPRIVOP_GET_PSR
 +
-+#ifndef CONFIG_X86_NO_TSS
-+struct tss_struct {
-+	unsigned short	back_link,__blh;
-+	unsigned long	esp0;
-+	unsigned short	ss0,__ss0h;
-+	unsigned long	esp1;
-+	unsigned short	ss1,__ss1h;	/* ss1 is used to cache MSR_IA32_SYSENTER_CS */
-+	unsigned long	esp2;
-+	unsigned short	ss2,__ss2h;
-+	unsigned long	__cr3;
-+	unsigned long	eip;
-+	unsigned long	eflags;
-+	unsigned long	eax,ecx,edx,ebx;
-+	unsigned long	esp;
-+	unsigned long	ebp;
-+	unsigned long	esi;
-+	unsigned long	edi;
-+	unsigned short	es, __esh;
-+	unsigned short	cs, __csh;
-+	unsigned short	ss, __ssh;
-+	unsigned short	ds, __dsh;
-+	unsigned short	fs, __fsh;
-+	unsigned short	gs, __gsh;
-+	unsigned short	ldt, __ldth;
-+	unsigned short	trace, io_bitmap_base;
-+	/*
-+	 * The extra 1 is there because the CPU will access an
-+	 * additional byte beyond the end of the IO permission
-+	 * bitmap. The extra byte must be all 1 bits, and must
-+	 * be within the limit.
-+	 */
-+	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
-+	/*
-+	 * Cache the current maximum and the last task that used the bitmap:
-+	 */
-+	unsigned long io_bitmap_max;
-+	struct thread_struct *io_bitmap_owner;
-+	/*
-+	 * pads the TSS to be cacheline-aligned (size is 0x100)
-+	 */
-+	unsigned long __cacheline_filler[35];
-+	/*
-+	 * .. and then another 0x100 bytes for emergency kernel stack
-+	 */
-+	unsigned long stack[64];
-+} __attribute__((packed));
++#define XSI_IFS			(XSI_BASE + XSI_IFS_OFS)
++#define XSI_PRECOVER_IFS	(XSI_BASE + XSI_PRECOVER_IFS_OFS)
++#define XSI_IFA			(XSI_BASE + XSI_IFA_OFS)
++#define XSI_ISR			(XSI_BASE + XSI_ISR_OFS)
++#define XSI_IIM			(XSI_BASE + XSI_IIM_OFS)
++#define XSI_ITIR		(XSI_BASE + XSI_ITIR_OFS)
++#define XSI_PSR_I_ADDR		(XSI_BASE + XSI_PSR_I_ADDR_OFS)
++#define XSI_PSR_IC		(XSI_BASE + XSI_PSR_IC_OFS)
++#define XSI_IPSR		(XSI_BASE + XSI_IPSR_OFS)
++#define XSI_IIP			(XSI_BASE + XSI_IIP_OFS)
++#define XSI_B1NAT		(XSI_BASE + XSI_B1NATS_OFS)
++#define XSI_BANK1_R16		(XSI_BASE + XSI_BANK1_R16_OFS)
++#define XSI_BANKNUM		(XSI_BASE + XSI_BANKNUM_OFS)
++#define XSI_IHA			(XSI_BASE + XSI_IHA_OFS)
 +#endif
 +
-+#define ARCH_MIN_TASKALIGN	16
-+
-+struct thread_struct {
-+/* cached TLS descriptors. */
-+	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-+	unsigned long	esp0;
-+	unsigned long	sysenter_cs;
-+	unsigned long	eip;
-+	unsigned long	esp;
-+	unsigned long	fs;
-+	unsigned long	gs;
-+/* Hardware debugging registers */
-+	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
-+/* fault info */
-+	unsigned long	cr2, trap_no, error_code;
-+/* floating point info */
-+	union i387_union	i387;
-+/* virtual 86 mode info */
-+	struct vm86_struct __user * vm86_info;
-+	unsigned long		screen_bitmap;
-+	unsigned long		v86flags, v86mask, saved_esp0;
-+	unsigned int		saved_fs, saved_gs;
-+/* IO permissions */
-+	unsigned long	*io_bitmap_ptr;
-+ 	unsigned long	iopl;
-+/* max allowed port in the bitmap, in bytes: */
-+	unsigned long	io_bitmap_max;
-+};
++#ifndef __ASSEMBLY__
++#define	XEN_HYPER_SSM_I		asm("break %0" : : "i" (HYPERPRIVOP_SSM_I): "memory")
 +
-+#define INIT_THREAD  {							\
-+	.vm86_info = NULL,						\
-+	.sysenter_cs = __KERNEL_CS,					\
-+	.io_bitmap_ptr = NULL,						\
-+}
++/************************************************/
++/* Instructions paravirtualized for correctness */
++/************************************************/
 +
-+#ifndef CONFIG_X86_NO_TSS
-+/*
-+ * Note that the .io_bitmap member must be extra-big. This is because
-+ * the CPU will access an additional byte beyond the end of the IO
-+ * permission bitmap. The extra byte must be all 1 bits, and must
-+ * be within the limit.
-+ */
-+#define INIT_TSS  {							\
-+	.esp0		= sizeof(init_stack) + (long)&init_stack,	\
-+	.ss0		= __KERNEL_DS,					\
-+	.ss1		= __KERNEL_CS,					\
-+	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
-+	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
++/* "fc" and "thash" are privilege-sensitive instructions, meaning they
++ *  may have different semantics depending on whether they are executed
++ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
++ *  be allowed to execute directly, lest incorrect semantics result. */
++#ifdef ASM_SUPPORTED
++static inline void
++xen_fc(unsigned long addr)
++{
++	register __u64 __addr asm ("r8") = addr;
++	asm volatile ("break %0":: "i"(HYPERPRIVOP_FC), "r"(__addr): "memory");
 +}
 +
-+static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++static inline unsigned long
++xen_thash(unsigned long addr)
 +{
-+	tss->esp0 = thread->esp0;
-+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
-+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-+		tss->ss1 = thread->sysenter_cs;
-+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-+	}
++	register __u64 ia64_intri_res asm ("r8");
++	register __u64 __addr asm ("r8") = addr;
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res):
++		      "i"(HYPERPRIVOP_THASH), "0"(__addr));
++	return ia64_intri_res;
 +}
-+#define load_esp0(tss, thread) \
-+	__load_esp0(tss, thread)
 +#else
-+#define load_esp0(tss, thread) \
-+	HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
++extern void xen_fc(unsigned long addr);
++extern unsigned long xen_thash(unsigned long addr);
 +#endif
 +
-+#define start_thread(regs, new_eip, new_esp) do {		\
-+	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
-+	set_fs(USER_DS);					\
-+	regs->xds = __USER_DS;					\
-+	regs->xes = __USER_DS;					\
-+	regs->xss = __USER_DS;					\
-+	regs->xcs = __USER_CS;					\
-+	regs->eip = new_eip;					\
-+	regs->esp = new_esp;					\
++#define ia64_fc(addr)							\
++do {									\
++	if (is_running_on_xen())					\
++		xen_fc((unsigned long)(addr));				\
++	else								\
++		__ia64_fc(addr);					\
 +} while (0)
 +
-+/*
-+ * These special macros can be used to get or set a debugging register
-+ */
-+#define get_debugreg(var, register)				\
-+		(var) = HYPERVISOR_get_debugreg((register))
-+#define set_debugreg(value, register)			\
-+		HYPERVISOR_set_debugreg((register), (value))
-+
-+/*
-+ * Set IOPL bits in EFLAGS from given mask
-+ */
-+static inline void set_iopl_mask(unsigned mask)
-+{
-+	struct physdev_set_iopl set_iopl;
-+
-+	/* Force the change at ring 0. */
-+	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+}
-+
-+/* Forward declaration, a strange C thing */
-+struct task_struct;
-+struct mm_struct;
-+
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
-+
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
-+
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-+
-+extern unsigned long thread_saved_pc(struct task_struct *tsk);
-+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
-+
-+unsigned long get_wchan(struct task_struct *p);
-+
-+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-+#define KSTK_TOP(info)                                                 \
-+({                                                                     \
-+       unsigned long *__ptr = (unsigned long *)(info);                 \
-+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
-+})
-+
-+/*
-+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-+ * This is necessary to guarantee that the entire "struct pt_regs"
-+ * is accessable even if the CPU haven't stored the SS/ESP registers
-+ * on the stack (interrupt gate does not save these registers
-+ * when switching to the same priv ring).
-+ * Therefore beware: accessing the xss/esp fields of the
-+ * "struct pt_regs" is possible, but they may contain the
-+ * completely wrong values.
-+ */
-+#define task_pt_regs(task)                                             \
-+({                                                                     \
-+       struct pt_regs *__regs__;                                       \
-+       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
-+       __regs__ - 1;                                                   \
++#define ia64_thash(addr)						\
++({									\
++	unsigned long ia64_intri_res;					\
++	if (is_running_on_xen())					\
++		ia64_intri_res =					\
++			xen_thash((unsigned long)(addr));		\
++	else								\
++		ia64_intri_res = __ia64_thash(addr);			\
++	ia64_intri_res;							\
 +})
 +
-+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
-+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
-+
-+
-+struct microcode_header {
-+	unsigned int hdrver;
-+	unsigned int rev;
-+	unsigned int date;
-+	unsigned int sig;
-+	unsigned int cksum;
-+	unsigned int ldrver;
-+	unsigned int pf;
-+	unsigned int datasize;
-+	unsigned int totalsize;
-+	unsigned int reserved[3];
-+};
-+
-+struct microcode {
-+	struct microcode_header hdr;
-+	unsigned int bits[0];
-+};
-+
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
-+
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+	unsigned int sig;
-+	unsigned int pf;
-+	unsigned int cksum;
-+};
-+
-+struct extended_sigtable {
-+	unsigned int count;
-+	unsigned int cksum;
-+	unsigned int reserved[3];
-+	struct extended_signature sigs[0];
-+};
++/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
++ * is not currently used (though it may be in a long-format VHPT system!)
++ * and the semantics of cover only change if psr.ic is off which is very
++ * rare (and currently non-existent outside of assembly code */
 +
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+static inline void rep_nop(void)
++/* There are also privilege-sensitive registers.  These registers are
++ * readable at any privilege level but only writable at PL0. */
++#ifdef ASM_SUPPORTED
++static inline unsigned long
++xen_get_cpuid(int index)
 +{
-+	__asm__ __volatile__("rep;nop": : :"memory");
++	register __u64 ia64_intri_res asm ("r8");
++	register __u64 __index asm ("r8") = index;
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res):
++		      "i"(HYPERPRIVOP_GET_CPUID), "0"(__index));
++	return ia64_intri_res;
 +}
 +
-+#define cpu_relax()	rep_nop()
++static inline unsigned long
++xen_get_pmd(int index)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	register __u64 __index asm ("r8") = index;
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res):
++		      "i"(HYPERPRIVOP_GET_PMD), "0O"(__index));
++	return ia64_intri_res;
++}
++#else
++extern unsigned long xen_get_cpuid(int index);
++extern unsigned long xen_get_pmd(int index);
++#endif
 +
-+/* generic versions from gas */
-+#define GENERIC_NOP1	".byte 0x90\n"
-+#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
-+#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
-+#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
-+#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
-+#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7
++#define ia64_get_cpuid(i)						\
++({									\
++	unsigned long ia64_intri_res;					\
++	if (is_running_on_xen())					\
++		ia64_intri_res = xen_get_cpuid(i);			\
++	else								\
++		ia64_intri_res = __ia64_get_cpuid(i);			\
++	ia64_intri_res;							\
++})
 +
-+/* Opteron nops */
-+#define K8_NOP1 GENERIC_NOP1
-+#define K8_NOP2	".byte 0x66,0x90\n" 
-+#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
-+#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
-+#define K8_NOP5	K8_NOP3 K8_NOP2 
-+#define K8_NOP6	K8_NOP3 K8_NOP3
-+#define K8_NOP7	K8_NOP4 K8_NOP3
-+#define K8_NOP8	K8_NOP4 K8_NOP4
++#define ia64_get_pmd(i)						\
++({									\
++	unsigned long ia64_intri_res;					\
++	if (is_running_on_xen())					\
++		ia64_intri_res = xen_get_pmd(i);			\
++	else								\
++		ia64_intri_res = __ia64_get_pmd(i);			\
++	ia64_intri_res;							\
++})
 +
-+/* K7 nops */
-+/* uses eax dependencies (arbitary choice) */
-+#define K7_NOP1  GENERIC_NOP1
-+#define K7_NOP2	".byte 0x8b,0xc0\n" 
-+#define K7_NOP3	".byte 0x8d,0x04,0x20\n"
-+#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"
-+#define K7_NOP5	K7_NOP4 ASM_NOP1
-+#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"
-+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-+#define K7_NOP8        K7_NOP7 ASM_NOP1
++#ifdef ASM_SUPPORTED
++static inline unsigned long
++xen_get_eflag(void)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_EFLAG));
++	return ia64_intri_res;
++}
 +
-+#ifdef CONFIG_MK8
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
-+#elif defined(CONFIG_MK7)
-+#define ASM_NOP1 K7_NOP1
-+#define ASM_NOP2 K7_NOP2
-+#define ASM_NOP3 K7_NOP3
-+#define ASM_NOP4 K7_NOP4
-+#define ASM_NOP5 K7_NOP5
-+#define ASM_NOP6 K7_NOP6
-+#define ASM_NOP7 K7_NOP7
-+#define ASM_NOP8 K7_NOP8
++static inline void
++xen_set_eflag(unsigned long val)
++{
++	register __u64 __val asm ("r8") = val;
++	asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_EFLAG), "r"(__val): "memory");
++}
 +#else
-+#define ASM_NOP1 GENERIC_NOP1
-+#define ASM_NOP2 GENERIC_NOP2
-+#define ASM_NOP3 GENERIC_NOP3
-+#define ASM_NOP4 GENERIC_NOP4
-+#define ASM_NOP5 GENERIC_NOP5
-+#define ASM_NOP6 GENERIC_NOP6
-+#define ASM_NOP7 GENERIC_NOP7
-+#define ASM_NOP8 GENERIC_NOP8
++extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
++extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
 +#endif
 +
-+#define ASM_NOP_MAX 8
++/************************************************/
++/* Instructions paravirtualized for performance */
++/************************************************/
 +
-+/* Prefetch instructions for Pentium III and AMD Athlon */
-+/* It's not worth to care about 3dnow! prefetches for the K6
-+   because they are microcoded there and very slow.
-+   However we don't do prefetches for pre XP Athlons currently
-+   That should be fixed. */
-+#define ARCH_HAS_PREFETCH
-+static inline void prefetch(const void *x)
-+{
-+	alternative_input(ASM_NOP4,
-+			  "prefetchnta (%1)",
-+			  X86_FEATURE_XMM,
-+			  "r" (x));
++/* Xen uses memory-mapped virtual privileged registers for access to many
++ * performance-sensitive privileged registers.  Some, like the processor
++ * status register (psr), are broken up into multiple memory locations.
++ * Others, like "pend", are abstractions based on privileged registers.
++ * "Pend" is guaranteed to be set if reading cr.ivr would return a
++ * (non-spurious) interrupt. */
++#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
++#define XSI_PSR_I			\
++	(*XEN_MAPPEDREGS->interrupt_mask_addr)
++#define xen_get_virtual_psr_i()		\
++	(!XSI_PSR_I)
++#define xen_set_virtual_psr_i(_val)	\
++	({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
++#define xen_set_virtual_psr_ic(_val)	\
++	({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
++#define xen_get_virtual_pend()		\
++	(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
++
++/* Hyperprivops are "break" instructions with a well-defined API.
++ * In particular, the virtual psr.ic bit must be off; in this way
++ * it is guaranteed to never conflict with a linux break instruction.
++ * Normally, this is done in a xen stub but this one is frequent enough
++ * that we inline it */
++#define xen_hyper_ssm_i()						\
++({									\
++	XEN_HYPER_SSM_I;						\
++})
++
++/* turning off interrupts can be paravirtualized simply by writing
++ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
++#define xen_rsm_i()							\
++{									\
++	xen_set_virtual_psr_i(0);					\
++	barrier();							\
 +}
 +
-+#define ARCH_HAS_PREFETCH
-+#define ARCH_HAS_PREFETCHW
-+#define ARCH_HAS_SPINLOCK_PREFETCH
++/* turning on interrupts is a bit more complicated.. write to the
++ * memory-mapped virtual psr.i bit first (to avoid race condition),
++ * then if any interrupts were pending, we have to execute a hyperprivop
++ * to ensure the pending interrupt gets delivered; else we're done! */
++#define xen_ssm_i()							\
++({									\
++	int old = xen_get_virtual_psr_i();				\
++	xen_set_virtual_psr_i(1);					\
++	barrier();							\
++	if (!old && xen_get_virtual_pend())				\
++		xen_hyper_ssm_i();					\
++})
 +
-+/* 3dnow! prefetch to get an exclusive cache line. Useful for 
-+   spinlocks to avoid one state transition in the cache coherency protocol. */
-+static inline void prefetchw(const void *x)
-+{
-+	alternative_input(ASM_NOP4,
-+			  "prefetchw (%1)",
-+			  X86_FEATURE_3DNOW,
-+			  "r" (x));
++#define xen_ia64_intrin_local_irq_restore(x)				\
++{									\
++     if (is_running_on_xen()) {						\
++	if ((x) & IA64_PSR_I) { xen_ssm_i(); }				\
++	else { xen_rsm_i(); }						\
++    }									\
++    else __ia64_intrin_local_irq_restore((x));				\
 +}
-+#define spin_lock_prefetch(x)	prefetchw(x)
 +
-+extern void select_idle_routine(const struct cpuinfo_x86 *c);
++#define	xen_get_psr_i()							\
++(									\
++	(is_running_on_xen()) ?						\
++		(xen_get_virtual_psr_i() ? IA64_PSR_I : 0)		\
++		: __ia64_get_psr_i()					\
++)
 +
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++#define xen_ia64_ssm(mask)						\
++{									\
++	if ((mask)==IA64_PSR_I) {					\
++		if (is_running_on_xen()) { xen_ssm_i(); }		\
++		else { __ia64_ssm(mask); }				\
++	}								\
++	else { __ia64_ssm(mask); }					\
++}
 +
-+extern unsigned long boot_option_idle_override;
-+extern void enable_sep_cpu(void);
-+extern int sysenter_setup(void);
++#define xen_ia64_rsm(mask)						\
++{									\
++	if ((mask)==IA64_PSR_I) {					\
++		if (is_running_on_xen()) { xen_rsm_i(); }		\
++		else { __ia64_rsm(mask); }				\
++	}								\
++	else { __ia64_rsm(mask); }					\
++}
 +
-+#endif /* __ASM_I386_PROCESSOR_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/ptrace.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/ptrace.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/ptrace.h	2007-07-30 16:35:12.000000000 +0200
-@@ -0,0 +1,90 @@
-+#ifndef _I386_PTRACE_H
-+#define _I386_PTRACE_H
 +
-+#define EBX 0
-+#define ECX 1
-+#define EDX 2
-+#define ESI 3
-+#define EDI 4
-+#define EBP 5
-+#define EAX 6
-+#define DS 7
-+#define ES 8
-+#define FS 9
-+#define GS 10
-+#define ORIG_EAX 11
-+#define EIP 12
-+#define CS  13
-+#define EFL 14
-+#define UESP 15
-+#define SS   16
-+#define FRAME_SIZE 17
++/* Although all privileged operations can be left to trap and will
++ * be properly handled by Xen, some are frequent enough that we use
++ * hyperprivops for performance. */
 +
-+/* this struct defines the way the registers are stored on the 
-+   stack during a system call. */
++#ifndef ASM_SUPPORTED 
++extern unsigned long xen_get_psr(void);
++extern unsigned long xen_get_ivr(void);
++extern unsigned long xen_get_tpr(void);
++extern void xen_set_itm(unsigned long);
++extern void xen_set_tpr(unsigned long);
++extern void xen_eoi(unsigned long);
++extern void xen_set_rr(unsigned long index, unsigned long val);
++extern unsigned long xen_get_rr(unsigned long index);
++extern void xen_set_kr(unsigned long index, unsigned long val);
++extern void xen_ptcga(unsigned long addr, unsigned long size);
++#else
++static inline unsigned long
++xen_get_psr(void)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_PSR));
++	return ia64_intri_res;
++}
 +
-+struct pt_regs {
-+	long ebx;
-+	long ecx;
-+	long edx;
-+	long esi;
-+	long edi;
-+	long ebp;
-+	long eax;
-+	int  xds;
-+	int  xes;
-+	long orig_eax;
-+	long eip;
-+	int  xcs;
-+	long eflags;
-+	long esp;
-+	int  xss;
-+};
++static inline unsigned long
++xen_get_ivr(void)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_IVR));
++	return ia64_intri_res;
++}
 +
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
++static inline unsigned long
++xen_get_tpr(void)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_TPR));
++	return ia64_intri_res;
++}
 +
-+#define PTRACE_OLDSETOPTIONS         21
++static inline void
++xen_set_tpr(unsigned long val)
++{
++	register __u64 __val asm ("r8") = val;
++	asm volatile ("break %0"::
++		      "i"(HYPERPRIVOP_GET_TPR), "r"(__val): "memory");
++}
 +
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
++static inline void
++xen_eoi(unsigned long val)
++{
++	register __u64 __val asm ("r8") = val;
++	asm volatile ("break %0"::
++		      "i"(HYPERPRIVOP_EOI), "r"(__val): "memory");
++}
 +
-+#define PTRACE_SYSEMU		  31
-+#define PTRACE_SYSEMU_SINGLESTEP  32
++static inline void
++xen_set_itm(unsigned long val)
++{
++	register __u64 __val asm ("r8") = val;
++	asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_ITM), "r"(__val): "memory");
++}
 +
-+#ifdef __KERNEL__
++static inline void
++xen_ptcga(unsigned long addr, unsigned long size)
++{
++	register __u64 __addr asm ("r8") = addr;
++	register __u64 __size asm ("r9") = size;
++	asm volatile ("break %0"::
++		      "i"(HYPERPRIVOP_PTC_GA), "r"(__addr), "r"(__size): "memory");
++}
 +
-+#include <asm/vm86.h>
++static inline unsigned long
++xen_get_rr(unsigned long index)
++{
++	register __u64 ia64_intri_res asm ("r8");
++	register __u64 __index asm ("r8") = index;
++	asm volatile ("break %1":
++		      "=r"(ia64_intri_res):
++		      "i"(HYPERPRIVOP_GET_RR), "0"(__index));
++	return ia64_intri_res;
++}
 +
-+struct task_struct;
-+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
++static inline void
++xen_set_rr(unsigned long index, unsigned long val)
++{
++	register __u64 __index asm ("r8") = index;
++	register __u64 __val asm ("r9") = val;
++	asm volatile ("break %0"::
++		      "i"(HYPERPRIVOP_SET_RR), "r"(__index), "r"(__val): "memory");
++}
 +
-+/*
-+ * user_mode_vm(regs) determines whether a register set came from user mode.
-+ * This is true if V8086 mode was enabled OR if the register set was from
-+ * protected mode with RPL-3 CS value.  This tricky test checks that with
-+ * one comparison.  Many places in the kernel can bypass this full check
-+ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
-+ */
-+static inline int user_mode(struct pt_regs *regs)
++static inline void
++xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
++		   unsigned long val2, unsigned long val3, unsigned long val4)
 +{
-+	return (regs->xcs & 2) != 0;
++	register __u64 __val0 asm ("r8") = val0;
++	register __u64 __val1 asm ("r9") = val1;
++	register __u64 __val2 asm ("r10") = val2;
++	register __u64 __val3 asm ("r11") = val3;
++	register __u64 __val4 asm ("r14") = val4;
++	asm volatile ("break %0" ::
++		      "i"(HYPERPRIVOP_SET_RR0_TO_RR4),
++		      "r"(__val0), "r"(__val1),
++		      "r"(__val2), "r"(__val3), "r"(__val4): "memory");
 +}
-+static inline int user_mode_vm(struct pt_regs *regs)
++
++static inline void
++xen_set_kr(unsigned long index, unsigned long val)
 +{
-+	return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
++	register __u64 __index asm ("r8") = index;
++	register __u64 __val asm ("r9") = val;
++	asm volatile ("break %0"::
++		      "i"(HYPERPRIVOP_SET_KR), "r"(__index), "r"(__val): "memory");
 +}
-+#define instruction_pointer(regs) ((regs)->eip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
 +#endif
-+#endif /* __KERNEL__ */
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/scatterlist.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/scatterlist.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/scatterlist.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/scatterlist.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,22 @@
-+#ifndef _I386_SCATTERLIST_H
-+#define _I386_SCATTERLIST_H
++/* Note: It may look wrong to test for is_running_on_xen() in each case.
++ * However regnum is always a constant so, as written, the compiler
++ * eliminates the switch statement, whereas is_running_on_xen() must be
++ * tested dynamically. */
++#define xen_ia64_getreg(regnum)						\
++({									\
++	__u64 ia64_intri_res;						\
++									\
++	switch(regnum) {						\
++	case _IA64_REG_PSR:						\
++		ia64_intri_res = (is_running_on_xen()) ?		\
++			xen_get_psr() :					\
++			__ia64_getreg(regnum);				\
++		break;							\
++	case _IA64_REG_CR_IVR:						\
++		ia64_intri_res = (is_running_on_xen()) ?		\
++			xen_get_ivr() :					\
++			__ia64_getreg(regnum);				\
++		break;							\
++	case _IA64_REG_CR_TPR:						\
++		ia64_intri_res = (is_running_on_xen()) ?		\
++			xen_get_tpr() :					\
++			__ia64_getreg(regnum);				\
++		break;							\
++	case _IA64_REG_AR_EFLAG:					\
++		ia64_intri_res = (is_running_on_xen()) ?		\
++			xen_get_eflag() :				\
++			__ia64_getreg(regnum);				\
++		break;							\
++	default:							\
++		ia64_intri_res = __ia64_getreg(regnum);			\
++		break;							\
++	}								\
++	ia64_intri_res;							\
++})
 +
-+struct scatterlist {
-+    struct page		*page;
-+    unsigned int	offset;
-+    unsigned int	length;
-+    dma_addr_t		dma_address;
-+    unsigned int	dma_length;
-+};
++#define xen_ia64_setreg(regnum,val)					\
++({									\
++	switch(regnum) {						\
++	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:			\
++		(is_running_on_xen()) ?					\
++			xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :	\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_ITM:						\
++		(is_running_on_xen()) ?					\
++			xen_set_itm(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_TPR:						\
++		(is_running_on_xen()) ?					\
++			xen_set_tpr(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_EOI:						\
++		(is_running_on_xen()) ?					\
++			xen_eoi(val) :					\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_AR_EFLAG:					\
++		(is_running_on_xen()) ?					\
++			xen_set_eflag(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	default:							\
++		__ia64_setreg(regnum,val);				\
++		break;							\
++	}								\
++})
 +
-+/* These macros should be used after a pci_map_sg call has been done
-+ * to get bus addresses of each of the SG entries and their lengths.
-+ * You should only work with the number of sg entries pci_map_sg
-+ * returns.
-+ */
-+#define sg_dma_address(sg)	((sg)->dma_address)
-+#define sg_dma_len(sg)		((sg)->dma_length)
++#define ia64_ptcga(addr, size)						\
++do {									\
++	if (is_running_on_xen())					\
++		xen_ptcga((addr), (size));				\
++	else								\
++		__ia64_ptcga((addr), (size));				\
++} while (0)
 +
-+#define ISA_DMA_THRESHOLD (0x00ffffff)
++#define ia64_set_rr(index, val)						\
++do {									\
++	if (is_running_on_xen())					\
++		xen_set_rr((index), (val));				\
++	else								\
++		__ia64_set_rr((index), (val));				\
++} while (0)
 +
-+#endif /* !(_I386_SCATTERLIST_H) */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/segment.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/segment.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/segment.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/segment.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,117 @@
-+#ifndef _ASM_SEGMENT_H
-+#define _ASM_SEGMENT_H
++#define ia64_get_rr(index)						\
++({									\
++	__u64 ia64_intri_res;						\
++	if (is_running_on_xen())					\
++		ia64_intri_res = xen_get_rr((index));			\
++	else								\
++		ia64_intri_res = __ia64_get_rr((index));		\
++	ia64_intri_res;							\
++})
 +
-+/*
-+ * The layout of the per-CPU GDT under Linux:
-+ *
-+ *   0 - null
-+ *   1 - reserved
-+ *   2 - reserved
-+ *   3 - reserved
-+ *
-+ *   4 - unused			<==== new cacheline
-+ *   5 - unused
-+ *
-+ *  ------- start of TLS (Thread-Local Storage) segments:
-+ *
-+ *   6 - TLS segment #1			[ glibc's TLS segment ]
-+ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
-+ *   8 - TLS segment #3
-+ *   9 - reserved
-+ *  10 - reserved
-+ *  11 - reserved
-+ *
-+ *  ------- start of kernel segments:
-+ *
-+ *  12 - kernel code segment		<==== new cacheline
-+ *  13 - kernel data segment
-+ *  14 - default user CS
-+ *  15 - default user DS
-+ *  16 - TSS
-+ *  17 - LDT
-+ *  18 - PNPBIOS support (16->32 gate)
-+ *  19 - PNPBIOS support
-+ *  20 - PNPBIOS support
-+ *  21 - PNPBIOS support
-+ *  22 - PNPBIOS support
-+ *  23 - APM BIOS support
-+ *  24 - APM BIOS support
-+ *  25 - APM BIOS support 
-+ *
-+ *  26 - ESPFIX small SS
-+ *  27 - unused
-+ *  28 - unused
-+ *  29 - unused
-+ *  30 - unused
-+ *  31 - TSS for double fault handler
-+ */
-+#define GDT_ENTRY_TLS_ENTRIES	3
-+#define GDT_ENTRY_TLS_MIN	6
-+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
++#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4)		\
++do {									\
++	if (is_running_on_xen())					\
++		xen_set_rr0_to_rr4((val0), (val1), (val2),		\
++				   (val3), (val4));			\
++	else								\
++		__ia64_set_rr0_to_rr4((val0), (val1), (val2),		\
++				      (val3), (val4));			\
++} while (0)
 +
-+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
++#define ia64_getreg			xen_ia64_getreg
++#define ia64_setreg			xen_ia64_setreg
++#define ia64_ssm			xen_ia64_ssm
++#define ia64_rsm			xen_ia64_rsm
++#define ia64_intrin_local_irq_restore	xen_ia64_intrin_local_irq_restore
++#define	ia64_get_psr_i			xen_get_psr_i
 +
-+#define GDT_ENTRY_DEFAULT_USER_CS	14
-+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
++/* the remainder of these are not performance-sensitive so its
++ * OK to not paravirtualize and just take a privop trap and emulate */
++#define ia64_hint			__ia64_hint
++#define ia64_set_pmd			__ia64_set_pmd
++#define ia64_itci			__ia64_itci
++#define ia64_itcd			__ia64_itcd
++#define ia64_itri			__ia64_itri
++#define ia64_itrd			__ia64_itrd
++#define ia64_tpa			__ia64_tpa
++#define ia64_set_ibr			__ia64_set_ibr
++#define ia64_set_pkr			__ia64_set_pkr
++#define ia64_set_pmc			__ia64_set_pmc
++#define ia64_get_ibr			__ia64_get_ibr
++#define ia64_get_pkr			__ia64_get_pkr
++#define ia64_get_pmc			__ia64_get_pmc
++#define ia64_ptce			__ia64_ptce
++#define ia64_ptcl			__ia64_ptcl
++#define ia64_ptri			__ia64_ptri
++#define ia64_ptrd			__ia64_ptrd
++
++#endif /* !__ASSEMBLY__ */
++
++/* these routines utilize privilege-sensitive or performance-sensitive
++ * privileged instructions so the code must be replaced with
++ * paravirtualized versions */
++#define	ia64_leave_kernel		xen_leave_kernel
++#define	ia64_leave_syscall		xen_leave_syscall
++#define	ia64_trace_syscall		xen_trace_syscall
++#define	ia64_ret_from_clone		xen_ret_from_clone
++#define	ia64_switch_to			xen_switch_to
++#define	ia64_pal_call_static		xen_pal_call_static
++
++#endif /* _ASM_IA64_XEN_PRIVOP_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/xen/xcom_hcall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/xen/xcom_hcall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,67 @@
++/*
++ * Copyright (C) 2006 Tristan Gingold <tristan.gingold at bull.net>, Bull SAS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ */
 +
-+#define GDT_ENTRY_DEFAULT_USER_DS	15
-+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++#ifndef _LINUX_XENCOMM_HCALL_H_
++#define _LINUX_XENCOMM_HCALL_H_
 +
-+#define GDT_ENTRY_KERNEL_BASE	12
++/* These function creates inline or mini descriptor for the parameters and
++   calls the corresponding xencomm_arch_hypercall_X.
++   Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
++   they want to use their own wrapper.  */
++extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
 +
-+#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
-+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
-+#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
 +
-+#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
-+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
-+#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++extern int xencomm_hypercall_xen_version(int cmd, void *arg);
 +
-+#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
-+#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
++extern int xencomm_hypercall_physdev_op(int cmd, void *op);
 +
-+#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
-+#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
++extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
++                                            unsigned int count);
 +
-+#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
-+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
++extern int xencomm_hypercall_sched_op(int cmd, void *arg);
 +
-+#define GDT_ENTRY_DOUBLEFAULT_TSS	31
++extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
 +
-+/*
-+ * The GDT has 32 entries
-+ */
-+#define GDT_ENTRIES 32
++extern int xencomm_hypercall_callback_op(int cmd, void *arg);
 +
-+#define GDT_SIZE (GDT_ENTRIES * 8)
++extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
 +
-+/* Simple and small GDT entries for booting only */
++extern unsigned long xencomm_hypercall_hvm_op(int cmd, void *arg);
 +
-+#define GDT_ENTRY_BOOT_CS		2
-+#define __BOOT_CS	(GDT_ENTRY_BOOT_CS * 8)
++extern int xencomm_hypercall_suspend(unsigned long srec);
 +
-+#define GDT_ENTRY_BOOT_DS		(GDT_ENTRY_BOOT_CS + 1)
-+#define __BOOT_DS	(GDT_ENTRY_BOOT_DS * 8)
++extern int xencomm_hypercall_xenoprof_op(int op, void *arg);
 +
-+/* The PnP BIOS entries in the GDT */
-+#define GDT_ENTRY_PNPBIOS_CS32		(GDT_ENTRY_PNPBIOS_BASE + 0)
-+#define GDT_ENTRY_PNPBIOS_CS16		(GDT_ENTRY_PNPBIOS_BASE + 1)
-+#define GDT_ENTRY_PNPBIOS_DS		(GDT_ENTRY_PNPBIOS_BASE + 2)
-+#define GDT_ENTRY_PNPBIOS_TS1		(GDT_ENTRY_PNPBIOS_BASE + 3)
-+#define GDT_ENTRY_PNPBIOS_TS2		(GDT_ENTRY_PNPBIOS_BASE + 4)
++extern int xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
++                                        unsigned long count);
 +
-+/* The PnP BIOS selectors */
-+#define PNP_CS32   (GDT_ENTRY_PNPBIOS_CS32 * 8)	/* segment for calling fn */
-+#define PNP_CS16   (GDT_ENTRY_PNPBIOS_CS16 * 8)	/* code segment for BIOS */
-+#define PNP_DS     (GDT_ENTRY_PNPBIOS_DS * 8)	/* data segment for BIOS */
-+#define PNP_TS1    (GDT_ENTRY_PNPBIOS_TS1 * 8)	/* transfer data segment */
-+#define PNP_TS2    (GDT_ENTRY_PNPBIOS_TS2 * 8)	/* another data segment */
++extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
 +
-+/*
-+ * The interrupt descriptor table has room for 256 idt's,
-+ * the global descriptor table is dependent on the number
-+ * of tasks we can have..
-+ */
-+#define IDT_ENTRIES 256
++extern long xencomm_hypercall_opt_feature(void *arg);
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/setup.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/setup.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/setup.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/setup.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,81 @@
-+/*
-+ *	Just a place holder. We don't want to have to test x86 before
-+ *	we include stuff
-+ */
++/* For privcmd.  Locally declare argument type to avoid include storm.
++   Type coherency will be checked within privcmd.c  */
++struct privcmd_hypercall;
++extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
 +
-+#ifndef _i386_SETUP_H
-+#define _i386_SETUP_H
++extern int xen_foreign_p2m_expose(struct privcmd_hypercall *hypercall);
 +
-+#ifdef __KERNEL__
-+#include <linux/pfn.h>
++extern int xencomm_hypercall_kexec_op(int cmd, void *arg);
 +
++#endif /* _LINUX_XENCOMM_HCALL_H_ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/xen/xencomm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/xen/xencomm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,33 @@
 +/*
-+ * Reserved space for vmalloc and iomap - defined in asm/page.h
++ * Copyright (C) 2006 Hollis Blanchard <hollisb at us.ibm.com>, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 + */
-+#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
-+#define MAX_NONPAE_PFN	(1 << 20)
-+#endif
 +
-+#define PARAM_SIZE 4096
-+#define COMMAND_LINE_SIZE 256
++#ifndef _ASM_IA64_XENCOMM_H_
++#define _ASM_IA64_XENCOMM_H_
 +
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC		0xA33F
-+#define OLD_CL_BASE_ADDR	0x90000
-+#define OLD_CL_OFFSET		0x90022
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
++#define is_kernel_addr(x)					\
++	((PAGE_OFFSET <= (x) &&					\
++	  (x) < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||	\
++	 (KERNEL_START <= (x) &&				\
++	  (x) < KERNEL_START + KERNEL_TR_PAGE_SIZE))
 +
-+#ifndef __ASSEMBLY__
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+extern unsigned char boot_params[PARAM_SIZE];
++/* Must be called before any hypercall.  */
++extern void xencomm_initialize (void);
 +
-+#define PARAM	(boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-+#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
-+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
++#include <xen/xencomm.h>
 +
-+/*
-+ * Do NOT EVER look at the BIOS memory size location.
-+ * It does not work on many machines.
++#endif /* _ASM_IA64_XENCOMM_H_ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-ia64/xenoprof.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-ia64/xenoprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-ia64/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
 + */
-+#define LOWMEMSIZE()	(0x9f000)
-+
-+struct e820entry;
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
 +
-+char * __init machine_specific_memory_setup(void);
++#undef HAVE_XENOPROF_CREATE_FILES
 +
-+int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
-+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
-+void __init add_memory_region(unsigned long long start,
-+			      unsigned long long size, int type);
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
 +
-+#endif /* __ASSEMBLY__ */
++struct xenoprof_arch_shared_buffer {
++	struct resource*	res;
++};
 +
-+#endif /* _i386_SETUP_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/smp.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/smp.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/smp.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,103 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++                                    struct xenoprof_shared_buffer *sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++                              struct xenoprof_shared_buffer *sbuf);
 +
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/mpic.h
+--- a/include/asm-powerpc/mpic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-powerpc/mpic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -305,6 +305,8 @@
+ #define MPIC_SPV_EOI			0x00000020
+ /* No passthrough disable */
+ #define MPIC_NO_PTHROU_DIS		0x00000040
++/* Skip reset of IPI vectors during init */
++#define MPIC_SKIP_IPI_INIT		0x00000080
+ 
+ /* MPIC HW modification ID */
+ #define MPIC_REGSET_MASK		0xf0000000
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/udbg.h
+--- a/include/asm-powerpc/udbg.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-powerpc/udbg.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -42,6 +42,7 @@
+ extern void __init udbg_init_pmac_realmode(void);
+ extern void __init udbg_init_maple_realmode(void);
+ extern void __init udbg_init_iseries(void);
++extern void __init udbg_init_xen(void);
+ extern void __init udbg_init_rtas_panel(void);
+ extern void __init udbg_init_rtas_console(void);
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/xen/asm/gnttab_dma.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-powerpc/xen/asm/gnttab_dma.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,29 @@
 +/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ * Copyright 2007 IBM Corp.
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
 + */
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#endif
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef __ASSEMBLY__
-+#include <asm/fixmap.h>
-+#include <asm/bitops.h>
-+#include <asm/mpspec.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#include <asm/apic.h>
-+#endif
-+#endif
++#ifndef _ASM_PPC_GNTTAB_DMA_H
++#define _ASM_PPC_GNTTAB_DMA_H
 +
-+#define BAD_APICID 0xFFu
-+#ifdef CONFIG_SMP
-+#ifndef __ASSEMBLY__
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++	return 0;
++}
 +
-+/*
-+ * Private routines/data
++#endif /* _ASM_PPC_GNTTAB_DMA_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/xen/asm/hypercall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-powerpc/xen/asm/hypercall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,90 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Copyright 2007 IBM Corp.
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ *          Jimi Xenidis <jimix at watson.ibm.com>
 + */
-+ 
-+extern void smp_alloc_memory(void);
-+extern int pic_mode;
-+extern int smp_num_siblings;
-+extern cpumask_t cpu_sibling_map[];
-+extern cpumask_t cpu_core_map[];
 +
-+extern void (*mtrr_hook) (void);
-+extern void zap_low_mappings (void);
-+extern void lock_ipi_call_lock(void);
-+extern void unlock_ipi_call_lock(void);
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
 +
-+#define MAX_APICID 256
-+extern u8 x86_cpu_to_apicid[];
++#include <asm/hvcall.h>
++#include <asm/page.h>
++#include <xen/xencomm.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/sched.h>
 +
-+#define cpu_physical_id(cpu)	x86_cpu_to_apicid[cpu]
++#define XEN_MARK(a)((a) | (~0UL << 16))
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern void cpu_exit_clear(void);
-+extern void cpu_uninit(void);
-+#endif
++extern int HYPERVISOR_console_io(int cmd, int count, char *str);
++extern int HYPERVISOR_event_channel_op(int cmd, void *op);
++extern int HYPERVISOR_xen_version(int cmd, void *arg);
++extern int HYPERVISOR_physdev_op(int cmd, void *op);
++extern int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop,
++		unsigned int count);
++extern int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
++extern int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
++extern int HYPERVISOR_multicall(void *call_list, int nr_calls);
++
++extern int HYPERVISOR_sched_op(int cmd, void *arg);
++extern int HYPERVISOR_poll(
++	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout);
 +
-+/*
-+ * This function is needed by all SMP systems. It must _always_ be valid
-+ * from the initial startup. We map APIC_BASE very early in page_setup(),
-+ * so this is correct in the x86 case.
-+ */
-+#define raw_smp_processor_id() (current_thread_info()->cpu)
++static inline int HYPERVISOR_shutdown(unsigned int reason)
++{
++	struct sched_shutdown sched_shutdown = {
++		.reason = reason
++	};
 +
-+extern cpumask_t cpu_possible_map;
-+#define cpu_callin_map cpu_possible_map
++	return HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++}
 +
-+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-+static inline int num_booting_cpus(void)
++static inline int HYPERVISOR_set_timer_op(unsigned long arg)
 +{
-+	return cpus_weight(cpu_possible_map);
++	return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_set_timer_op), arg);
 +}
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+
-+#ifdef APIC_DEFINITION
-+extern int hard_smp_processor_id(void);
-+#else
-+#include <mach_apicdef.h>
-+static inline int hard_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++extern int HYPERVISOR_suspend(unsigned long srec);
++extern int HYPERVISOR_kexec_op(unsigned long op, void *args);
++static inline unsigned long HYPERVISOR_hvm_op(int op, void *arg) {
++	return -ENOSYS;
 +}
-+#endif
 +
-+static __inline int logical_smp_processor_id(void)
++static inline int
++HYPERVISOR_mmu_update(
++	mmu_update_t *req, int count, int *success_count, domid_t domid)
 +{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++	return -ENOSYS;
 +}
 +
-+#endif
++struct privcmd_hypercall;
++extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
 +
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
-+extern void prefill_possible_map(void);
-+#endif /* !__ASSEMBLY__ */
++#endif	/*  __HYPERCALL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/xen/asm/hypervisor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-powerpc/xen/asm/hypervisor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,276 @@
++/******************************************************************************
++ * hypervisor.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+#else /* CONFIG_SMP */
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
 +
-+#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <xen/interface/xen.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#include <asm/irq.h>
 +
-+#define NO_PROC_ID		0xFF		/* No processor magic marker */
++extern shared_info_t *HYPERVISOR_shared_info;
 +
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info && \
++				(xen_start_info->flags & SIF_INITDOMAIN))
++#else
++#define is_initial_xendomain() 0
 +#endif
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/spinlock.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/spinlock.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/spinlock.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/spinlock.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,202 @@
-+#ifndef __ASM_SPINLOCK_H
-+#define __ASM_SPINLOCK_H
 +
-+#include <asm/atomic.h>
-+#include <asm/rwlock.h>
-+#include <asm/page.h>
-+#include <linux/compiler.h>
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
 +
-+/*
-+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
-+ *
-+ * Simple spin lock operations.  There are two variants, one clears IRQ's
-+ * on the local processor, one does not.
-+ *
-+ * We make no fairness assumptions. They have a cost.
-+ *
-+ * (the type definitions are in asm/spinlock_types.h)
-+ */
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
 +
-+#define __raw_spin_is_locked(x) \
-+		(*(volatile signed char *)(&(x)->slock) <= 0)
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
 +
-+#define __raw_spin_lock_string \
-+	"\n1:\t" \
-+	LOCK_PREFIX " ; decb %0\n\t" \
-+	"jns 3f\n" \
-+	"2:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0,%0\n\t" \
-+	"jle 2b\n\t" \
-+	"jmp 1b\n" \
-+	"3:\n\t"
++/* arch/xen/i386/kernel/head.S */
++void lgdt_finish(void);
 +
++/* arch/xen/i386/mm/hypervisor.c */
 +/*
-+ * NOTE: there's an irqs-on section here, which normally would have to be
-+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
-+ * __raw_spin_lock_string_flags().
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
 + */
-+#define __raw_spin_lock_string_flags \
-+	"\n1:\t" \
-+	LOCK_PREFIX " ; decb %0\n\t" \
-+	"jns 5f\n" \
-+	"2:\t" \
-+	"testl $0x200, %1\n\t" \
-+	"jz 4f\n\t" \
-+	"#sti\n" \
-+	"3:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0, %0\n\t" \
-+	"jle 3b\n\t" \
-+	"#cli\n\t" \
-+	"jmp 1b\n" \
-+	"4:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0, %0\n\t" \
-+	"jg 1b\n\t" \
-+	"jmp 4b\n" \
-+	"5:\n\t"
 +
-+static inline void __raw_spin_lock(raw_spinlock_t *lock)
-+{
-+	asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
-+}
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
 +
-+/*
-+ * It is easier for the lock validator if interrupts are not re-enabled
-+ * in the middle of a lock-acquire. This is a performance feature anyway
-+ * so we turn it off:
-+ */
-+#ifndef CONFIG_PROVE_LOCKING
-+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
-+{
-+	asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
-+}
++#ifndef CONFIG_XEN_SHADOW_MODE
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++void xen_pud_pin(unsigned long ptr); /* x86_64 only */
++void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pte_pin(unsigned long ptr);
++void xen_pte_unpin(unsigned long ptr);
++#else
++#define xen_l1_entry_update(_p, _v) set_pte((_p), (_v))
++#define xen_l2_entry_update(_p, _v) set_pgd((_p), (_v))
++#define xen_pgd_pin(_p)   ((void)0)
++#define xen_pgd_unpin(_p) ((void)0)
++#define xen_pte_pin(_p)   ((void)0)
++#define xen_pte_unpin(_p) ((void)0)
 +#endif
 +
-+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
-+{
-+	char oldval;
-+	__asm__ __volatile__(
-+		"xchgb %b0,%1"
-+		:"=q" (oldval), "+m" (lock->slock)
-+		:"0" (0) : "memory");
-+	return oldval > 0;
-+}
++void xen_set_ldt(unsigned long ptr, unsigned long bytes);
++void xen_machphys_update(unsigned long mfn, unsigned long pfn);
 +
-+/*
-+ * __raw_spin_unlock based on writing $1 to the low byte.
-+ * This method works. Despite all the confusion.
-+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
-+ * (PPro errata 66, 92)
-+ */
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
 +
-+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
++/* Returns zero on success else negative errno. */
++static inline int xen_create_contiguous_region(
++    unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++	return 0;
++}
++static inline void xen_destroy_contiguous_region(
++    unsigned long vstart, unsigned int order)
++{
++	return;
++}
 +
-+#define __raw_spin_unlock_string \
-+	"movb $1,%0" \
-+		:"+m" (lock->slock) : : "memory"
++#include <asm/hypercall.h>
 +
++/* BEGIN: all of these need a new home */
++struct vm_area_struct;
++int direct_remap_pfn_range(struct vm_area_struct *vma,  unsigned long address,
++			   unsigned long mfn, unsigned long size,
++			   pgprot_t prot, domid_t  domid);
++#define	pfn_to_mfn(x)	(x)
++#define	mfn_to_pfn(x)	(x)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define phys_to_machine_mapping_valid(pfn) (1)
 +
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-+{
-+	__asm__ __volatile__(
-+		__raw_spin_unlock_string
-+	);
-+}
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
++#define machine_to_virt(m)	(__va(m))
++#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
 +
-+#else
 +
-+#define __raw_spin_unlock_string \
-+	"xchgb %b0, %1" \
-+		:"=q" (oldval), "+m" (lock->slock) \
-+		:"0" (oldval) : "memory"
++#define PIRQ_BASE		0
++#define NR_PIRQS		256
 +
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-+{
-+	char oldval = 1;
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		256
 +
-+	__asm__ __volatile__(
-+		__raw_spin_unlock_string
-+	);
-+}
++#define NR_IPIS 4		/* PPC_MSG_DEBUGGER_BREAK + 1 */
 +
++#if NR_IRQS < (NR_PIRQS + NR_DYNIRQS)
++#error to many Xen IRQs
 +#endif
 +
-+#define __raw_spin_unlock_wait(lock) \
-+	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
++#define NR_IRQ_VECTORS		NR_IRQS
 +
-+/*
-+ * Read-write spinlocks, allowing multiple readers
-+ * but only one writer.
-+ *
-+ * NOTE! it is quite common to have readers in interrupts
-+ * but no interrupt writers. For those circumstances we
-+ * can "mix" irq-safe locks - any writer needs to get a
-+ * irq-safe write-lock, but readers can get non-irqsafe
-+ * read-locks.
-+ *
-+ * On x86, we implement read-write locks as a 32-bit counter
-+ * with the high bit (sign) being the "contended" bit.
-+ *
-+ * The inline assembly is non-obvious. Think about it.
-+ *
-+ * Changed to use the same technique as rw semaphores.  See
-+ * semaphore.h for details.  -ben
-+ *
-+ * the helpers are in arch/i386/kernel/semaphore.c
-+ */
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
 +
-+/**
-+ * read_can_lock - would read_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define __raw_read_can_lock(x)		((int)(x)->lock > 0)
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
-+/**
-+ * write_can_lock - would write_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define __raw_write_can_lock(x)		((x)->lock == RW_LOCK_BIAS)
 +
-+static inline void __raw_read_lock(raw_rwlock_t *rw)
-+{
-+	__build_read_lock(rw, "__read_lock_failed");
-+}
++/* END:  all of these need a new home */
 +
-+static inline void __raw_write_lock(raw_rwlock_t *rw)
-+{
-+	__build_write_lock(rw, "__write_lock_failed");
-+}
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
 +
-+static inline int __raw_read_trylock(raw_rwlock_t *lock)
-+{
-+	atomic_t *count = (atomic_t *)lock;
-+	atomic_dec(count);
-+	if (atomic_read(count) >= 0)
-+		return 1;
-+	atomic_inc(count);
-+	return 0;
-+}
++extern int is_running_on_xen(void);
 +
-+static inline int __raw_write_trylock(raw_rwlock_t *lock)
++static inline void
++MULTI_update_va_mapping(
++    multicall_entry_t *mcl, unsigned long va,
++    pte_t new_val, unsigned long flags)
 +{
-+	atomic_t *count = (atomic_t *)lock;
-+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
-+		return 1;
-+	atomic_add(RW_LOCK_BIAS, count);
-+	return 0;
++    mcl->op = __HYPERVISOR_update_va_mapping;
++    mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++    mcl->args[1] = new_val.pte;
++    mcl->args[2] = flags;
++#elif defined(CONFIG_X86_PAE)
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = new_val.pte_high;
++    mcl->args[3] = flags;
++#elif defined(CONFIG_PPC64)
++    mcl->args[1] = pte_val(new_val);
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++#else
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++#endif
 +}
 +
-+static inline void __raw_read_unlock(raw_rwlock_t *rw)
++static inline void
++MULTI_update_va_mapping_otherdomain(
++    multicall_entry_t *mcl, unsigned long va,
++    pte_t new_val, unsigned long flags, domid_t domid)
 +{
-+	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++    mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
++    mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++    mcl->args[1] = new_val.pte;
++    mcl->args[2] = flags;
++    mcl->args[3] = domid;
++#elif defined(CONFIG_X86_PAE)
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = new_val.pte_high;
++    mcl->args[3] = flags;
++    mcl->args[4] = domid;
++#elif defined(CONFIG_PPC64)
++    mcl->args[1] = pte_val(new_val);
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++    mcl->args[4] = domid;
++#else
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++    mcl->args[4] = domid;
++#endif
 +}
 +
-+static inline void __raw_write_unlock(raw_rwlock_t *rw)
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME(m) (INVALID_P2M_ENTRY)
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 +{
-+	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
-+				 : "+m" (rw->lock) : : "memory");
++	if (pfn != mfn && mfn != INVALID_P2M_ENTRY)
++		printk(KERN_EMERG "%s: pfn: 0x%lx mfn: 0x%lx\n",
++		       __func__, pfn, mfn);
++	
++	return;
 +}
++#define pfn_pte_ma(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +
-+#endif /* __ASM_SPINLOCK_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/swiotlb.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/swiotlb.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/swiotlb.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,43 @@
-+#ifndef _ASM_SWIOTLB_H
-+#define _ASM_SWIOTLB_H 1
++typedef unsigned long maddr_t;
++typedef unsigned long paddr_t;
 +
-+/* SWIOTLB interface */
++#ifdef CONFIG_XEN_SCRUB_PAGES
 +
-+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
-+				      int dir);
-+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-+				  size_t size, int dir);
-+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-+					 dma_addr_t dev_addr,
-+					 size_t size, int dir);
-+extern void swiotlb_sync_single_for_device(struct device *hwdev,
-+					    dma_addr_t dev_addr,
-+					    size_t size, int dir);
-+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-+				     struct scatterlist *sg, int nelems,
-+				     int dir);
-+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-+					struct scatterlist *sg, int nelems,
-+					int dir);
-+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, int direction);
-+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, int direction);
-+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-+#ifdef CONFIG_HIGHMEM
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+                                   unsigned long offset, size_t size,
-+                                   enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+                               size_t size, enum dma_data_direction direction);
-+#endif
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+extern void swiotlb_init(void);
++static inline void scrub_pages(void *p, unsigned n)
++{
++	unsigned i;
 +
-+#ifdef CONFIG_SWIOTLB
-+extern int swiotlb;
++	for (i = 0; i < n; i++) {
++		clear_page(p);
++		p += PAGE_SIZE;
++	}
++}
 +#else
-+#define swiotlb 0
-+#endif
-+
++#define scrub_pages(_p,_n) ((void)0)
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/synch_bitops.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/synch_bitops.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/synch_bitops.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,145 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
 +
 +/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
++ * for blktap.c
++ * int create_lookup_pte_addr(struct mm_struct *mm, 
++ *                            unsigned long address,
++ *                            uint64_t *ptep);
 + */
++#define create_lookup_pte_addr(mm, address, ptep)			\
++	({								\
++		printk(KERN_EMERG					\
++		       "%s:%d "						\
++		       "create_lookup_pte_addr() isn't supported.\n",	\
++		       __func__, __LINE__);				\
++		BUG();							\
++		(-ENOSYS);						\
++	})
 +
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
++#endif /* __HYPERVISOR_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/xen/asm/maddr.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-powerpc/xen/asm/maddr.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,7 @@
++#ifndef _POWERPC_MADDR_H
++#define _POWERPC_MADDR_H
 +
-+#define ADDR (*(volatile long *) addr)
++#include <xen/features.h>
++#include <xen/interface/xen.h>
 +
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ ( 
-+        "lock btsl %1,%0"
-+        : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-powerpc/xen/asm/synch_bitops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-powerpc/xen/asm/synch_bitops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,100 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ * Copyright 2006 IBM Corp.
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ */
 +
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ (
-+        "lock btrl %1,%0"
-+        : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
++#ifndef  __SYNCH_BITOPS_H__
++#define __SYNCH_BITOPS_H__
 +
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ (
-+        "lock btcl %1,%0"
-+        : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
++#include <linux/config.h>
++#include <xen/interface/xen.h>
 +
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "lock btsl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
-+}
++#ifdef CONFIG_SMP
++#include <asm/bitops.h>
 +
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "lock btrl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
-+}
++#define synch_change_bit(a,b) change_bit(a,b)
++#define synch_clear_bit(a,b) clear_bit(a,b)
++#define synch_const_test_bit(a,b) const_test_bit(a,b) 
++#define synch_set_bit(a,b) set_bit(a,b)
++#define synch_test_and_set_bit(a,b) test_and_set_bit(a,b)
++#define synch_test_and_change_bit(a,b) test_and_change_bit(a,b)
++#define synch_test_and_clear_bit(a,b) test_and_clear_bit(a,b)
++#define synch_test_bit(a,b) test_bit(a,b)
 +
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++static __inline__ unsigned long
++__synch_cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
 +{
-+    int oldbit;
-+
-+    __asm__ __volatile__ (
-+        "lock btcl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
++	int idx;
++	volatile unsigned int *xp = (unsigned int *)((ulong)p & ~(0x3UL));
++	union {
++		unsigned int word;
++		struct {
++			unsigned short s[2];
++		}s;
++	} xold, xnew;
++
++	/* we could start the reservation here and copy the u32
++	 * assembler, but I don't think it will gain us a whole
++	 * lot. */
++	xold.word = *xp;
++	xnew.word = xold.word;
++	idx = ((ulong)p >> 1) & 0x1;
++	xold.s.s[idx] = old;
++	xnew.s.s[idx] = new;
++
++	return __cmpxchg_u32(xp, xold.word, xnew.word);
 +}
 +
-+struct __synch_xchg_dummy { unsigned long a[100]; };
-+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
-+
-+#define synch_cmpxchg(ptr, old, new) \
-+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-+                                     (unsigned long)(old), \
-+                                     (unsigned long)(new), \
-+                                     sizeof(*(ptr))))
-+
-+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-+					    unsigned long old,
-+					    unsigned long new, int size)
++/*
++ * This function doesn't exist, so you'll get a linker error
++ * if something tries to do an invalid xchg().
++ */
++extern void __synch_cmpxchg_called_with_bad_pointer(void);
++static __inline__ unsigned long
++__synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
++	       unsigned int size)
 +{
-+	unsigned long prev;
 +	switch (size) {
-+	case 1:
-+		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
 +	case 2:
-+		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+#ifdef CONFIG_X86_64
++		return __synch_cmpxchg_u16(ptr, old, new);
 +	case 4:
-+		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
++		return __cmpxchg_u32(ptr, old, new);
++#ifdef CONFIG_PPC64
 +	case 8:
-+		__asm__ __volatile__("lock; cmpxchgq %1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+#else
-+	case 4:
-+		__asm__ __volatile__("lock; cmpxchgl %1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
++		return __cmpxchg_u64(ptr, old, new);
 +#endif
 +	}
++	__synch_cmpxchg_called_with_bad_pointer();
 +	return old;
 +}
 +
-+static __always_inline int synch_const_test_bit(int nr,
-+						const volatile void * addr)
-+{
-+    return ((1UL << (nr & 31)) & 
-+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-+}
++#define synch_cmpxchg(ptr,o,n)						 \
++  ({									 \
++     __typeof__(*(ptr)) _o_ = (o);					 \
++     __typeof__(*(ptr)) _n_ = (n);					 \
++     (__typeof__(*(ptr))) __synch_cmpxchg((ptr), (unsigned long)_o_,		 \
++				    (unsigned long)_n_, sizeof(*(ptr))); \
++  })
 +
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "btl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-+    return oldbit;
-+}
++#define synch_cmpxchg_subword(ptr,o,n) __synch_cmpxchg_u16((ptr), (o), (n))
 +
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
++#else
++#error "this only works for CONFIG_SMP"
++#endif
 +
-+#define synch_cmpxchg_subword synch_cmpxchg
++#endif /* __SYNCH_BITOPS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/acpi.h
+--- a/include/asm-x86_64/acpi.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/acpi.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -28,6 +28,9 @@
+ 
+ #ifdef __KERNEL__
+ 
++#ifdef CONFIG_XEN
++#include <xen/interface/platform.h>
++#endif
+ #include <acpi/pdc_intel.h>
+ 
+ #define COMPILER_DEPENDENT_INT64   long long
+@@ -129,6 +132,27 @@
+ }
+ extern int acpi_irq_balance_set(char *str);
+ 
++#ifdef CONFIG_XEN
++static inline int acpi_notify_hypervisor_state(u8 sleep_state,
++					       u32 pm1a_cnt_val,
++					       u32 pm1b_cnt_val)
++{
++	struct xen_platform_op op = {
++		.cmd = XENPF_enter_acpi_sleep,
++		.interface_version = XENPF_INTERFACE_VERSION,
++		.u = {
++			.enter_acpi_sleep = {
++				.pm1a_cnt_val = pm1a_cnt_val,
++				.pm1b_cnt_val = pm1b_cnt_val,
++				.sleep_state = sleep_state,
++			},
++		},
++	};
 +
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/system.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/system.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/system.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,488 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
++	return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_XEN */
 +
-+#include <linux/kernel.h>
-+#include <asm/segment.h>
-+#include <asm/cpufeature.h>
-+#include <linux/bitops.h> /* for LOCK_PREFIX */
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
+ #else	/* !CONFIG_ACPI */
+ 
+ #define acpi_lapic 0
+@@ -152,7 +176,6 @@
+ 
+ /* early initialization routine */
+ extern void acpi_reserve_bootmem(void);
+-
+ #endif /*CONFIG_ACPI_SLEEP*/
+ 
+ #define boot_cpu_physical_apicid boot_cpu_id
+@@ -162,7 +185,9 @@
+ 
+ extern u8 x86_acpiid_to_apicid[];
+ 
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+ 
+ extern int acpi_skip_timer_override;
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/agp.h
+--- a/include/asm-x86_64/agp.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/agp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -10,8 +10,10 @@
+  * with different cachability attributes for the same page.
+  */
+ 
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
++#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+ #define flush_agp_mappings() global_flush_tlb()
+ 
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/apic.h
+--- a/include/asm-x86_64/apic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/apic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -98,11 +98,13 @@
+ extern int disable_timer_pin_1;
+ 
+ 
++#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(void);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+ 
+ #define ARCH_APICTIMER_STOPS_ON_C3	1
++#endif
+ 
+ #endif /* CONFIG_X86_LOCAL_APIC */
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/io_apic.h
+--- a/include/asm-x86_64/io_apic.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/io_apic.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,7 +12,7 @@
+ 
+ #ifdef CONFIG_X86_IO_APIC
+ 
+-#ifdef CONFIG_PCI_MSI
++#if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
+ static inline int use_pci_vector(void)	{return 1;}
+ static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+ static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/kexec.h
+--- a/include/asm-x86_64/kexec.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/kexec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -1,5 +1,26 @@
+ #ifndef _X86_64_KEXEC_H
+ #define _X86_64_KEXEC_H
 +
-+#ifdef __KERNEL__
++#define PA_CONTROL_PAGE  0
++#define VA_CONTROL_PAGE  1
++#define PA_PGD           2
++#define VA_PGD           3
++#define PA_PUD_0         4
++#define VA_PUD_0         5
++#define PA_PMD_0         6
++#define VA_PMD_0         7
++#define PA_PTE_0         8
++#define VA_PTE_0         9
++#define PA_PUD_1         10
++#define VA_PUD_1         11
++#define PA_PMD_1         12
++#define VA_PMD_1         13
++#define PA_PTE_1         14
++#define VA_PTE_1         15
++#define PA_TABLE_PAGE    16
++#define PAGES_NR         17
 +
-+struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
-+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
++#ifndef __ASSEMBLY__
+ 
+ #include <linux/string.h>
+ 
+@@ -64,4 +85,25 @@
+ 		newregs->rip = (unsigned long)current_text_addr();
+ 	}
+ }
 +
-+/*
-+ * Saving eflags is important. It switches not only IOPL between tasks,
-+ * it also protects other tasks from NT leaking through sysenter etc.
++NORET_TYPE void
++relocate_kernel(unsigned long indirection_page,
++		unsigned long page_list,
++		unsigned long start_address) ATTRIB_NORET;
++
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of 
++ * the pseudo physical address which would be given by the default macros.
 + */
-+#define switch_to(prev,next,last) do {					\
-+	unsigned long esi,edi;						\
-+	asm volatile("pushfl\n\t"		/* Save flags */	\
-+		     "pushl %%ebp\n\t"					\
-+		     "movl %%esp,%0\n\t"	/* save ESP */		\
-+		     "movl %5,%%esp\n\t"	/* restore ESP */	\
-+		     "movl $1f,%1\n\t"		/* save EIP */		\
-+		     "pushl %6\n\t"		/* restore EIP */	\
-+		     "jmp __switch_to\n"				\
-+		     "1:\t"						\
-+		     "popl %%ebp\n\t"					\
-+		     "popfl"						\
-+		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
-+		      "=a" (last),"=S" (esi),"=D" (edi)			\
-+		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
-+		      "2" (prev), "d" (next));				\
-+} while (0)
 +
-+#define _set_base(addr,base) do { unsigned long __pr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+	"rorl $16,%%edx\n\t" \
-+	"movb %%dl,%2\n\t" \
-+	"movb %%dh,%3" \
-+	:"=&d" (__pr) \
-+	:"m" (*((addr)+2)), \
-+	 "m" (*((addr)+4)), \
-+	 "m" (*((addr)+7)), \
-+         "0" (base) \
-+        ); } while(0)
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page)  pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
 +
-+#define _set_limit(addr,limit) do { unsigned long __lr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+	"rorl $16,%%edx\n\t" \
-+	"movb %2,%%dh\n\t" \
-+	"andb $0xf0,%%dh\n\t" \
-+	"orb %%dh,%%dl\n\t" \
-+	"movb %%dl,%2" \
-+	:"=&d" (__lr) \
-+	:"m" (*(addr)), \
-+	 "m" (*((addr)+6)), \
-+	 "0" (limit) \
-+        ); } while(0)
++#endif /* __ASSEMBLY__ */
 +
-+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
+ #endif /* _X86_64_KEXEC_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/agp.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/agp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,40 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/cacheflush.h>
++#include <asm/system.h>
 +
 +/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
++ * Functions to keep the agpgart mappings coherent.
++ * The GART gives the CPU a physical alias of memory. The alias is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page.
 + */
-+#define loadsegment(seg,value)			\
-+	asm volatile("\n"			\
-+		"1:\t"				\
-+		"mov %0,%%" #seg "\n"		\
-+		"2:\n"				\
-+		".section .fixup,\"ax\"\n"	\
-+		"3:\t"				\
-+		"pushl $0\n\t"			\
-+		"popl %%" #seg "\n\t"		\
-+		"jmp 2b\n"			\
-+		".previous\n"			\
-+		".section __ex_table,\"a\"\n\t"	\
-+		".align 4\n\t"			\
-+		".long 1b,3b\n"			\
-+		".previous"			\
-+		: :"rm" (value))
++
++#define map_page_into_agp(page) ( \
++	xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++	?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++	xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++	/* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++	change_page_attr(page, 1, PAGE_KERNEL))
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++   need to be called for each cacheline of the whole page so it may not be
++   worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order)	({                                          \
++	char *_t; dma_addr_t _d;                                            \
++	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
++	_t; })
++#define free_gatt_pages(table, order)	\
++	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/arch_hooks.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/arch_hooks.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,27 @@
++#ifndef _ASM_ARCH_HOOKS_H
++#define _ASM_ARCH_HOOKS_H
++
++#include <linux/interrupt.h>
 +
 +/*
-+ * Save a segment register away
++ *	linux/include/asm/arch_hooks.h
++ *
++ *	define the architecture specific hooks 
 + */
-+#define savesegment(seg, value) \
-+	asm volatile("mov %%" #seg ",%0":"=rm" (value))
 +
-+#define read_cr0() ({ \
-+	unsigned int __dummy; \
-+	__asm__ __volatile__( \
-+		"movl %%cr0,%0\n\t" \
-+		:"=r" (__dummy)); \
-+	__dummy; \
-+})
-+#define write_cr0(x) \
-+	__asm__ __volatile__("movl %0,%%cr0": :"r" (x))
++/* these aren't arch hooks, they are generic routines
++ * that can be used by the hooks */
++extern void init_ISA_irqs(void);
++extern void apic_intr_init(void);
++extern void smp_intr_init(void);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
 +
-+#define read_cr2() (current_vcpu_info()->arch.cr2)
-+#define write_cr2(x) \
-+	__asm__ __volatile__("movl %0,%%cr2": :"r" (x))
++/* these are the defined hooks */
++extern void intr_init_hook(void);
++extern void pre_intr_init_hook(void);
++extern void pre_setup_arch_hook(void);
++extern void trap_init_hook(void);
++extern void time_init_hook(void);
++extern void mca_nmi_hook(void);
 +
-+#define read_cr3() ({ \
-+	unsigned int __dummy; \
-+	__asm__ ( \
-+		"movl %%cr3,%0\n\t" \
-+		:"=r" (__dummy)); \
-+	__dummy = xen_cr3_to_pfn(__dummy); \
-+	mfn_to_pfn(__dummy) << PAGE_SHIFT; \
-+})
-+#define write_cr3(x) ({						\
-+	unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT);	\
-+	__dummy = xen_pfn_to_cr3(__dummy);			\
-+	__asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy));	\
-+})
-+#define read_cr4() ({ \
-+	unsigned int __dummy; \
-+	__asm__( \
-+		"movl %%cr4,%0\n\t" \
-+		:"=r" (__dummy)); \
-+	__dummy; \
-+})
-+#define read_cr4_safe() ({			      \
-+	unsigned int __dummy;			      \
-+	/* This could fault if %cr4 does not exist */ \
-+	__asm__("1: movl %%cr4, %0		\n"   \
-+		"2:				\n"   \
-+		".section __ex_table,\"a\"	\n"   \
-+		".long 1b,2b			\n"   \
-+		".previous			\n"   \
-+		: "=r" (__dummy): "0" (0));	      \
-+	__dummy;				      \
-+})
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/bootsetup.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/bootsetup.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,42 @@
 +
-+#define write_cr4(x) \
-+	__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
++#ifndef _X86_64_BOOTSETUP_H
++#define _X86_64_BOOTSETUP_H 1
++
++#define BOOT_PARAM_SIZE		4096
++extern char x86_boot_params[BOOT_PARAM_SIZE];
 +
 +/*
-+ * Clear and set 'TS' bit respectively
++ * This is set up by the setup-routine at boot-time
 + */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
++#define PARAM	((unsigned char *)x86_boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
 +
-+#endif	/* __KERNEL__ */
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
 +
-+#define wbinvd() \
-+	__asm__ __volatile__ ("wbinvd": : :"memory")
++#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
++#define COMMAND_LINE saved_command_line
 +
-+static inline unsigned long get_limit(unsigned long segment)
-+{
-+	unsigned long __limit;
-+	__asm__("lsll %1,%0"
-+		:"=r" (__limit):"r" (segment));
-+	return __limit+1;
-+}
++#define RAMDISK_IMAGE_START_MASK  	0x07FF
++#define RAMDISK_PROMPT_FLAG		0x8000
++#define RAMDISK_LOAD_FLAG		0x4000	
 +
-+#define nop() __asm__ __volatile__ ("nop")
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/desc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/desc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,265 @@
++/* Written 2000 by Andi Kleen */ 
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
 +
-+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++#include <linux/threads.h>
++#include <asm/ldt.h>
 +
-+#define tas(ptr) (xchg((ptr),1))
++#ifndef __ASSEMBLY__
 +
-+struct __xchg_dummy { unsigned long a[100]; };
-+#define __xg(x) ((struct __xchg_dummy *)(x))
++#include <linux/string.h>
++#include <linux/smp.h>
 +
++#include <asm/segment.h>
++#include <asm/mmu.h>
 +
-+#ifdef CONFIG_X86_CMPXCHG64
++// 8 byte segment descriptor
++struct desc_struct { 
++	u16 limit0;
++	u16 base0;
++	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
++} __attribute__((packed)); 
 +
-+/*
-+ * The semantics of XCHGCMP8B are a bit strange, this is why
-+ * there is a loop and the loading of %%eax and %%edx has to
-+ * be inside. This inlines well in most cases, the cached
-+ * cost is around ~38 cycles. (in the future we might want
-+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
-+ * might have an implicit FPU-save as a cost, so it's not
-+ * clear which path to go.)
-+ *
-+ * cmpxchg8b must be used with the lock prefix here to allow
-+ * the instruction to be executed atomically, see page 3-102
-+ * of the instruction set reference 24319102.pdf. We need
-+ * the reader side to see the coherent 64bit value.
-+ */
-+static inline void __set_64bit (unsigned long long * ptr,
-+		unsigned int low, unsigned int high)
-+{
-+	__asm__ __volatile__ (
-+		"\n1:\t"
-+		"movl (%0), %%eax\n\t"
-+		"movl 4(%0), %%edx\n\t"
-+		"lock cmpxchg8b (%0)\n\t"
-+		"jnz 1b"
-+		: /* no outputs */
-+		:	"D"(ptr),
-+			"b"(low),
-+			"c"(high)
-+		:	"ax","dx","memory");
-+}
++struct n_desc_struct { 
++	unsigned int a,b;
++}; 	
 +
-+static inline void __set_64bit_constant (unsigned long long *ptr,
-+						 unsigned long long value)
-+{
-+	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-+}
-+#define ll_low(x)	*(((unsigned int*)&(x))+0)
-+#define ll_high(x)	*(((unsigned int*)&(x))+1)
++enum { 
++	GATE_INTERRUPT = 0xE, 
++	GATE_TRAP = 0xF, 	
++	GATE_CALL = 0xC,
++}; 	
 +
-+static inline void __set_64bit_var (unsigned long long *ptr,
-+			 unsigned long long value)
-+{
-+	__set_64bit(ptr,ll_low(value), ll_high(value));
-+}
++// 16byte gate
++struct gate_struct {          
++	u16 offset_low;
++	u16 segment; 
++	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++	u16 offset_middle;
++	u32 offset_high;
++	u32 zero1; 
++} __attribute__((packed));
 +
-+#define set_64bit(ptr,value) \
-+(__builtin_constant_p(value) ? \
-+ __set_64bit_constant(ptr, value) : \
-+ __set_64bit_var(ptr, value) )
++#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
++#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
++#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
 +
-+#define _set_64bit(ptr,value) \
-+(__builtin_constant_p(value) ? \
-+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
-+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
++enum { 
++	DESC_TSS = 0x9,
++	DESC_LDT = 0x2,
++}; 
 +
-+#endif
++// LDT or TSS descriptor in the GDT. 16 bytes.
++struct ldttss_desc { 
++	u16 limit0;
++	u16 base0;
++	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++	u32 base3;
++	u32 zero1; 
++} __attribute__((packed)); 
 +
-+/*
-+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
-+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
-+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
-+ */
-+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-+{
-+	switch (size) {
-+		case 1:
-+			__asm__ __volatile__("xchgb %b0,%1"
-+				:"=q" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 2:
-+			__asm__ __volatile__("xchgw %w0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 4:
-+			__asm__ __volatile__("xchgl %0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+	}
-+	return x;
-+}
++struct desc_ptr {
++	unsigned short size;
++	unsigned long address;
++} __attribute__((packed)) ;
 +
-+/*
-+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
-+ * store NEW in MEM.  Return the initial value in MEM.  Success is
-+ * indicated by comparing RETURN with OLD.
-+ */
++extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
 +
-+#ifdef CONFIG_X86_CMPXCHG
-+#define __HAVE_ARCH_CMPXCHG 1
-+#define cmpxchg(ptr,o,n)\
-+	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-+					(unsigned long)(n),sizeof(*(ptr))))
-+#endif
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 +
-+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-+				      unsigned long new, int size)
++#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++
++static inline void clear_LDT(void)
 +{
-+	unsigned long prev;
-+	switch (size) {
-+	case 1:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 2:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 4:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	}
-+	return old;
++	int cpu = get_cpu();
++
++	/*
++	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
++	 * it slows down context switching. Noone uses it anyway.
++	 */
++	cpu = cpu;              /* XXX avoid compiler warning */
++	xen_set_ldt(NULL, 0);
++	put_cpu();
 +}
 +
-+#ifndef CONFIG_X86_CMPXCHG
 +/*
-+ * Building a kernel capable running on 80386. It may be necessary to
-+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
-+ * a function for each of the sizes we support.
++ * This is the ldt that every process will get unless we need
++ * something other than this.
 + */
++extern struct desc_struct default_ldt[];
++#ifndef CONFIG_X86_NO_IDT
++extern struct gate_struct idt_table[]; 
++#endif
++extern struct desc_ptr cpu_gdt_descr[];
 +
-+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
-+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
-+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
++/* the cpu gdt accessor */
++#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
 +
-+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
-+				      unsigned long new, int size)
++static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)  
 +{
-+	switch (size) {
-+	case 1:
-+		return cmpxchg_386_u8(ptr, old, new);
-+	case 2:
-+		return cmpxchg_386_u16(ptr, old, new);
-+	case 4:
-+		return cmpxchg_386_u32(ptr, old, new);
-+	}
-+	return old;
-+}
++	struct gate_struct s; 	
++	s.offset_low = PTR_LOW(func); 
++	s.segment = __KERNEL_CS;
++	s.ist = ist; 
++	s.p = 1;
++	s.dpl = dpl; 
++	s.zero0 = 0;
++	s.zero1 = 0; 
++	s.type = type; 
++	s.offset_middle = PTR_MIDDLE(func); 
++	s.offset_high = PTR_HIGH(func); 
++	/* does not need to be atomic because it is only done once at setup time */ 
++	memcpy(adr, &s, 16); 
++} 
 +
-+#define cmpxchg(ptr,o,n)						\
-+({									\
-+	__typeof__(*(ptr)) __ret;					\
-+	if (likely(boot_cpu_data.x86 > 3))				\
-+		__ret = __cmpxchg((ptr), (unsigned long)(o),		\
-+					(unsigned long)(n), sizeof(*(ptr))); \
-+	else								\
-+		__ret = cmpxchg_386((ptr), (unsigned long)(o),		\
-+					(unsigned long)(n), sizeof(*(ptr))); \
-+	__ret;								\
-+})
-+#endif
++#ifndef CONFIG_X86_NO_IDT
++static inline void set_intr_gate(int nr, void *func) 
++{ 
++	BUG_ON((unsigned)nr > 0xFF);
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 
++} 
 +
-+#ifdef CONFIG_X86_CMPXCHG64
++static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 
++{ 
++	BUG_ON((unsigned)nr > 0xFF);
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 
++} 
 +
-+static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
-+				      unsigned long long new)
++static inline void set_system_gate(int nr, void *func) 
++{ 
++	BUG_ON((unsigned)nr > 0xFF);
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 
++} 
++
++static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
 +{
-+	unsigned long long prev;
-+	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
-+			     : "=A"(prev)
-+			     : "b"((unsigned long)new),
-+			       "c"((unsigned long)(new >> 32)),
-+			       "m"(*__xg(ptr)),
-+			       "0"(old)
-+			     : "memory");
-+	return prev;
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
 +}
-+
-+#define cmpxchg64(ptr,o,n)\
-+	((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
-+					(unsigned long long)(n)))
-+
 +#endif
-+    
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ *
-+ * For now, "wmb()" doesn't actually do anything, as all
-+ * Intel CPU's follow what Intel calls a *Processor Order*,
-+ * in which all writes are seen in the program order even
-+ * outside the CPU.
-+ *
-+ * I expect future Intel CPU's to have a weaker ordering,
-+ * but I'd also expect them to finally get their act together
-+ * and add some real memory barriers if so.
-+ *
-+ * Some non intel clones support out of order store. wmb() ceases to be a
-+ * nop for these.
-+ */
-+ 
 +
-+/* 
-+ * Actually only lfence would be needed for mb() because all stores done 
-+ * by the kernel should be already ordered. But keep a full barrier for now. 
-+ */
++static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, 
++					 unsigned size) 
++{ 
++	struct ldttss_desc d;
++	memset(&d,0,sizeof(d)); 
++	d.limit0 = size & 0xFFFF;
++	d.base0 = PTR_LOW(tss); 
++	d.base1 = PTR_MIDDLE(tss) & 0xFF; 
++	d.type = type;
++	d.p = 1; 
++	d.limit1 = (size >> 16) & 0xF;
++	d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; 
++	d.base3 = PTR_HIGH(tss); 
++	memcpy(ptr, &d, 16); 
++}
 +
-+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
++#ifndef CONFIG_X86_NO_TSS
++static inline void set_tss_desc(unsigned cpu, void *addr)
++{ 
++	/*
++	 * sizeof(unsigned long) coming from an extra "long" at the end
++	 * of the iobitmap. See tss_struct definition in processor.h
++	 *
++	 * -1? seg base+limit should be pointing to the address of the
++	 * last valid byte
++	 */
++	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], 
++		(unsigned long)addr, DESC_TSS,
++		IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
++} 
++#endif
 +
-+/**
-+ * read_barrier_depends - Flush all pending reads that subsequents reads
-+ * depend on.
-+ *
-+ * No data-dependent reads from memory-like regions are ever reordered
-+ * over this barrier.  All reads preceding this primitive are guaranteed
-+ * to access memory (but not necessarily other CPUs' caches) before any
-+ * reads following this primitive that depend on the data return by
-+ * any of the preceding reads.  This primitive is much lighter weight than
-+ * rmb() on most CPUs, and is never heavier weight than is
-+ * rmb().
-+ *
-+ * These ordering constraints are respected by both the local CPU
-+ * and the compiler.
-+ *
-+ * Ordering is not guaranteed by anything other than these primitives,
-+ * not even by data dependencies.  See the documentation for
-+ * memory_barrier() for examples and URLs to more information.
-+ *
-+ * For example, the following code would force ordering (the initial
-+ * value of "a" is zero, "b" is one, and "p" is "&a"):
-+ *
-+ * <programlisting>
-+ *	CPU 0				CPU 1
-+ *
-+ *	b = 2;
-+ *	memory_barrier();
-+ *	p = &b;				q = p;
-+ *					read_barrier_depends();
-+ *					d = *q;
-+ * </programlisting>
-+ *
-+ * because the read of "*q" depends on the read of "p" and these
-+ * two reads are separated by a read_barrier_depends().  However,
-+ * the following code, with the same initial values for "a" and "b":
-+ *
-+ * <programlisting>
-+ *	CPU 0				CPU 1
-+ *
-+ *	a = 2;
-+ *	memory_barrier();
-+ *	b = 3;				y = b;
-+ *					read_barrier_depends();
-+ *					x = a;
-+ * </programlisting>
-+ *
-+ * does not enforce ordering, since there is no data dependency between
-+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
-+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
-+ * in cases like this where there are no data dependencies.
-+ **/
++static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
++{ 
++	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
++			      DESC_LDT, size * 8 - 1);
++}
 +
-+#define read_barrier_depends()	do { } while(0)
++static inline void set_seg_base(unsigned cpu, int entry, void *base)
++{ 
++	struct desc_struct *d = &cpu_gdt(cpu)[entry];
++	u32 addr = (u32)(u64)base;
++	BUG_ON((u64)base >> 32); 
++	d->base0 = addr & 0xffff;
++	d->base1 = (addr >> 16) & 0xff;
++	d->base2 = (addr >> 24) & 0xff;
++} 
 +
-+#ifdef CONFIG_X86_OOSTORE
-+/* Actually there are no OOO store capable CPUs for now that do SSE, 
-+   but make it already an possibility. */
-+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-+#else
-+#define wmb()	__asm__ __volatile__ ("": : :"memory")
++#define LDT_entry_a(info) \
++	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++/* Don't allow setting of the lm bit. It is useless anyways because 
++   64bit system calls require __USER_CS. */ 
++#define LDT_entry_b(info) \
++	(((info)->base_addr & 0xff000000) | \
++	(((info)->base_addr & 0x00ff0000) >> 16) | \
++	((info)->limit & 0xf0000) | \
++	(((info)->read_exec_only ^ 1) << 9) | \
++	((info)->contents << 10) | \
++	(((info)->seg_not_present ^ 1) << 15) | \
++	((info)->seg_32bit << 22) | \
++	((info)->limit_in_pages << 23) | \
++	((info)->useable << 20) | \
++	/* ((info)->lm << 21) | */ \
++	0x7000)
++
++#define LDT_empty(info) (\
++	(info)->base_addr	== 0	&& \
++	(info)->limit		== 0	&& \
++	(info)->contents	== 0	&& \
++	(info)->read_exec_only	== 1	&& \
++	(info)->seg_32bit	== 0	&& \
++	(info)->limit_in_pages	== 0	&& \
++	(info)->seg_not_present	== 1	&& \
++	(info)->useable		== 0	&& \
++	(info)->lm		== 0)
++
++#if TLS_SIZE != 24
++# error update this code.
 +#endif
 +
-+#ifdef CONFIG_SMP
-+#define smp_mb()	mb()
-+#define smp_rmb()	rmb()
-+#define smp_wmb()	wmb()
-+#define smp_read_barrier_depends()	read_barrier_depends()
-+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-+#else
-+#define smp_mb()	barrier()
-+#define smp_rmb()	barrier()
-+#define smp_wmb()	barrier()
-+#define smp_read_barrier_depends()	do { } while(0)
-+#define set_mb(var, value) do { var = value; barrier(); } while (0)
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#if 0
++	u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
++	gdt[0] = t->tls_array[0];
++	gdt[1] = t->tls_array[1];
++	gdt[2] = t->tls_array[2];
 +#endif
++#define C(i) \
++	if (HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++						 t->tls_array[i])) \
++		BUG();
 +
-+#include <linux/irqflags.h>
++	C(0); C(1); C(2);
++#undef C
++} 
 +
 +/*
-+ * disable hlt during certain critical i/o operations
++ * load one particular LDT into the current CPU
 + */
-+#define HAVE_DISABLE_HLT
-+void disable_hlt(void);
-+void enable_hlt(void);
++static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
++{
++	void *segments = pc->ldt;
++	int count = pc->size;
 +
-+extern int es7000_plat;
-+void cpu_idle_wait(void);
++	if (likely(!count))
++		segments = NULL;
 +
-+/*
-+ * On SMP systems, when the scheduler does migration-cost autodetection,
-+ * it needs a way to flush as much of the CPU's caches as possible:
-+ */
-+static inline void sched_cacheflush(void)
++	xen_set_ldt(segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
 +{
-+	wbinvd();
++	int cpu = get_cpu();
++	load_LDT_nolock(pc, cpu);
++	put_cpu();
 +}
 +
-+extern unsigned long arch_align_stack(unsigned long sp);
-+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++extern struct desc_ptr idt_descr;
 +
-+void default_idle(void);
++#endif /* !__ASSEMBLY__ */
 +
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/tlbflush.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/tlbflush.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/tlbflush.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,101 @@
-+#ifndef _I386_TLBFLUSH_H
-+#define _I386_TLBFLUSH_H
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/dma-mapping.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/dma-mapping.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,207 @@
++#ifndef _X8664_DMA_MAPPING_H
++#define _X8664_DMA_MAPPING_H 1
 +
-+#include <linux/mm.h>
-+#include <asm/processor.h>
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
 +
-+#define __flush_tlb() xen_tlb_flush()
-+#define __flush_tlb_global() xen_tlb_flush()
-+#define __flush_tlb_all() xen_tlb_flush()
 +
-+extern unsigned long pgkern_mask;
++#include <asm/scatterlist.h>
++#include <asm/io.h>
++#include <asm/swiotlb.h>
 +
-+#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
++struct dma_mapping_ops {
++	int             (*mapping_error)(dma_addr_t dma_addr);
++	void*           (*alloc_coherent)(struct device *dev, size_t size,
++                                dma_addr_t *dma_handle, gfp_t gfp);
++	void            (*free_coherent)(struct device *dev, size_t size,
++                                void *vaddr, dma_addr_t dma_handle);
++	dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
++                                size_t size, int direction);
++	/* like map_single, but doesn't check the device mask */
++	dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
++                                size_t size, int direction);
++	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
++		                size_t size, int direction);
++	void            (*sync_single_for_cpu)(struct device *hwdev,
++		                dma_addr_t dma_handle, size_t size,
++				int direction);
++	void            (*sync_single_for_device)(struct device *hwdev,
++                                dma_addr_t dma_handle, size_t size,
++				int direction);
++	void            (*sync_single_range_for_cpu)(struct device *hwdev,
++                                dma_addr_t dma_handle, unsigned long offset,
++		                size_t size, int direction);
++	void            (*sync_single_range_for_device)(struct device *hwdev,
++				dma_addr_t dma_handle, unsigned long offset,
++		                size_t size, int direction);
++	void            (*sync_sg_for_cpu)(struct device *hwdev,
++                                struct scatterlist *sg, int nelems,
++				int direction);
++	void            (*sync_sg_for_device)(struct device *hwdev,
++				struct scatterlist *sg, int nelems,
++				int direction);
++	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
++		                int nents, int direction);
++	void            (*unmap_sg)(struct device *hwdev,
++				struct scatterlist *sg, int nents,
++				int direction);
++	int             (*dma_supported)(struct device *hwdev, u64 mask);
++	int		is_phys;
++};
 +
-+#define __flush_tlb_single(addr) xen_invlpg(addr)
++extern dma_addr_t bad_dma_address;
++extern struct dma_mapping_ops* dma_ops;
++extern int iommu_merge;
 +
-+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
++static inline int valid_dma_direction(int dma_direction)
++{
++	return ((dma_direction == DMA_BIDIRECTIONAL) ||
++		(dma_direction == DMA_TO_DEVICE) ||
++		(dma_direction == DMA_FROM_DEVICE));
++}
 +
-+/*
-+ * TLB flushing:
-+ *
-+ *  - flush_tlb() flushes the current mm struct TLBs
-+ *  - flush_tlb_all() flushes all processes TLBs
-+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ *  - flush_tlb_page(vma, vmaddr) flushes one page
-+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
-+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ *
-+ * ..but the i386 has somewhat limited tlb flushing capabilities,
-+ * and page-granular flushes are available only on i486 and up.
-+ */
++#if 0
++static inline int dma_mapping_error(dma_addr_t dma_addr)
++{
++	if (dma_ops->mapping_error)
++		return dma_ops->mapping_error(dma_addr);
 +
-+#ifndef CONFIG_SMP
++	return (dma_addr == bad_dma_address);
++}
 +
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
++extern void *dma_alloc_coherent(struct device *dev, size_t size,
++				dma_addr_t *dma_handle, gfp_t gfp);
++extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++			      dma_addr_t dma_handle);
 +
-+static inline void flush_tlb_mm(struct mm_struct *mm)
++static inline dma_addr_t
++dma_map_single(struct device *hwdev, void *ptr, size_t size,
++	       int direction)
 +{
-+	if (mm == current->active_mm)
-+		__flush_tlb();
++	BUG_ON(!valid_dma_direction(direction));
++	return dma_ops->map_single(hwdev, ptr, size, direction);
 +}
 +
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+	unsigned long addr)
++static inline void
++dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
++		 int direction)
 +{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb_one(addr);
++	BUG_ON(!valid_dma_direction(direction));
++	dma_ops->unmap_single(dev, addr, size, direction);
 +}
 +
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+	unsigned long start, unsigned long end)
++#define dma_map_page(dev,page,offset,size,dir) \
++	dma_map_single((dev), page_address(page)+(offset), (size), (dir))
++
++#define dma_unmap_page dma_unmap_single
++
++static inline void
++dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++			size_t size, int direction)
 +{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb();
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_single_for_cpu)
++		dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
++					     direction);
++	flush_write_buffers();
 +}
 +
-+#else
++static inline void
++dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
++			   size_t size, int direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_single_for_device)
++		dma_ops->sync_single_for_device(hwdev, dma_handle, size,
++						direction);
++	flush_write_buffers();
++}
 +
-+#include <asm/smp.h>
++static inline void
++dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++			      unsigned long offset, size_t size, int direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_single_range_for_cpu) {
++		dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
++	}
 +
-+#define local_flush_tlb() \
-+	__flush_tlb()
++	flush_write_buffers();
++}
 +
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++static inline void
++dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
++				 unsigned long offset, size_t size, int direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_single_range_for_device)
++		dma_ops->sync_single_range_for_device(hwdev, dma_handle,
++						      offset, size, direction);
 +
-+#define flush_tlb()	flush_tlb_current_task()
++	flush_write_buffers();
++}
 +
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++static inline void
++dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++		    int nelems, int direction)
 +{
-+	flush_tlb_mm(vma->vm_mm);
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_sg_for_cpu)
++		dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
++	flush_write_buffers();
 +}
 +
-+#define TLBSTATE_OK	1
-+#define TLBSTATE_LAZY	2
++static inline void
++dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++		       int nelems, int direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	if (dma_ops->sync_sg_for_device) {
++		dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
++	}
 +
-+struct tlb_state
++	flush_write_buffers();
++}
++
++static inline int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
 +{
-+	struct mm_struct *active_mm;
-+	int state;
-+	char __cacheline_padding[L1_CACHE_BYTES-8];
-+};
-+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++	BUG_ON(!valid_dma_direction(direction));
++	return dma_ops->map_sg(hwdev, sg, nents, direction);
++}
 +
++static inline void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++	     int direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	dma_ops->unmap_sg(hwdev, sg, nents, direction);
++}
 +
-+#endif
++extern int dma_supported(struct device *hwdev, u64 mask);
 +
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++/* same for gart, swiotlb, and nommu */
++static inline int dma_get_cache_alignment(void)
++{
++	return boot_cpu_data.x86_clflush_size;
++}
 +
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+				      unsigned long start, unsigned long end)
++#define dma_is_consistent(h) 1
++
++extern int dma_set_mask(struct device *dev, u64 mask);
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
 +{
-+	/* i386 does not keep any page table caches in TLB */
++	flush_write_buffers();
 +}
 +
-+#endif /* _I386_TLBFLUSH_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/vga.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/vga.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/vga.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,20 @@
++extern struct device fallback_dev;
++extern int panic_on_overflow;
++#endif
++
++#endif /* _X8664_DMA_MAPPING_H */
++
++#include <asm-i386/mach-xen/asm/dma-mapping.h>
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/e820.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/e820.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,66 @@
 +/*
-+ *	Access to VGA videoram
++ * structures and definitions for the int 15, ax=e820 memory map
++ * scheme.
 + *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
++ * In a nutshell, setup.S populates a scratch table in the
++ * empty_zero_block that contains a list of usable address/size
++ * duples.  setup.c, this information is transferred into the e820map,
++ * and in init.c/numa.c, that new information is used to mark pages
++ * reserved or not.
 + */
++#ifndef __E820_HEADER
++#define __E820_HEADER
 +
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
++#include <linux/mmzone.h>
 +
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
++#define E820MAP	0x2d0		/* our map */
++#define E820MAX	128		/* number of entries in E820MAP */
++#define E820NR	0x1e8		/* # entries in E820MAP */
 +
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++#define E820_RAM	1
++#define E820_RESERVED	2
++#define E820_ACPI	3 /* usable as RAM once ACPI tables have been read */
++#define E820_NVS	4
 +
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
++#define HIGH_MEMORY	(1024*1024)
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/xenoprof.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/xenoprof.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/asm/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/asm/xenoprof.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,48 @@
-+/******************************************************************************
-+ * asm-i386/mach-xen/asm/xenoprof.h
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
++#define LOWMEMSIZE()	(0x9f000)
++
++#ifndef __ASSEMBLY__
++struct e820entry {
++	u64 addr;	/* start of memory segment */
++	u64 size;	/* size of memory segment */
++	u32 type;	/* type of memory segment */
++} __attribute__((packed));
++
++struct e820map {
++    int nr_map;
++	struct e820entry map[E820MAX];
++};
++
++extern unsigned long find_e820_area(unsigned long start, unsigned long end, 
++				    unsigned size);
++extern void add_memory_region(unsigned long start, unsigned long size, 
++			      int type);
++extern void setup_memory_region(void);
++extern void contig_e820_setup(void); 
++extern unsigned long e820_end_of_ram(void);
++extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
++extern void e820_print_map(char *who);
++extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
++extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
++
++extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
++extern void e820_setup_gap(struct e820entry *e820, int nr_map);
++extern unsigned long e820_hole_size(unsigned long start_pfn,
++				    unsigned long end_pfn);
++
++extern void __init parse_memopt(char *p, char **end);
++extern void __init parse_memmapopt(char *p, char **end);
++
++extern struct e820map e820;
++
++extern unsigned ebda_addr, ebda_size;
++#endif/*!__ASSEMBLY__*/
++
++#endif/*__E820_HEADER*/
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/fixmap.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/fixmap.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,112 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
 + *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
++ * Copyright (C) 1998 Ingo Molnar
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/kernel.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/vsyscall.h>
++#include <asm/vsyscall32.h>
++#include <asm/acpi.h>
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process.
 + *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
 + *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
 + */
-+#ifndef __ASM_XENOPROF_H__
-+#define __ASM_XENOPROF_H__
-+#ifdef CONFIG_XEN
 +
-+struct super_block;
-+struct dentry;
-+int xenoprof_create_files(struct super_block * sb, struct dentry * root);
-+#define HAVE_XENOPROF_CREATE_FILES
++enum fixed_addresses {
++	VSYSCALL_LAST_PAGE,
++	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
++	VSYSCALL_HPET,
++	FIX_HPET_BASE,
++#ifdef CONFIG_X86_LOCAL_APIC
++	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	FIX_IO_APIC_BASE_0,
++	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_ACPI
++	FIX_ACPI_BEGIN,
++	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++	FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS	256
++	FIX_ISAMAP_END,
++	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++	__end_of_permanent_fixed_addresses,
++	/* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS	16
++	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++	__end_of_fixed_addresses
++};
++
++extern void __set_fixmap (enum fixed_addresses idx,
++					unsigned long phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++                __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP	(VSYSCALL_END-PAGE_SIZE)
++#define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
++
++/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
++#define FIXADDR_USER_START	((unsigned long)VSYSCALL32_VSYSCALL)
++#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
++
++#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without translation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++	/*
++	 * this branch gets completely eliminated after inlining,
++	 * except when someone tries to use fixaddr indices in an
++	 * illegal way. (such as mixing up address types or using
++	 * out-of-range indices).
++	 *
++	 * If it doesn't get removed, the linker will complain
++	 * loudly with a reasonably clear error message..
++	 */
++	if (idx >= __end_of_fixed_addresses)
++		__this_fixmap_does_not_exist();
 +
-+struct xenoprof_init;
-+void xenoprof_arch_init_counter(struct xenoprof_init *init);
-+void xenoprof_arch_counter(void);
-+void xenoprof_arch_start(void);
-+void xenoprof_arch_stop(void);
++        return __fix_to_virt(idx);
++}
 +
-+struct xenoprof_arch_shared_buffer {
-+	/* nothing */
-+};
-+struct xenoprof_shared_buffer;
-+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_get_buffer;
-+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_passive;
-+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/gnttab_dma.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/gnttab_dma.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/hw_irq.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hw_irq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,136 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
 +
-+#endif /* CONFIG_XEN */
-+#endif /* __ASM_XENOPROF_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/irq_vectors.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/irq_vectors.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/irq_vectors.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,125 @@
 +/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ *	FIRST_EXTERNAL_VECTOR:
-+ *		The first free place for external interrupts
++ *	linux/include/asm/hw_irq.h
 + *
-+ *	SYSCALL_VECTOR:
-+ *		The IRQ vector a syscall makes the user to kernel transition
-+ *		under.
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
 + *
-+ *	TIMER_IRQ:
-+ *		The IRQ number the timer interrupt comes in at.
++ *	moved some of the old arch/i386/kernel/irq.h to here. VY
 + *
-+ *	NR_IRQS:
-+ *		The total number of interrupt vectors (including all the
-+ *		architecture specific interrupts) needed.
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
 + *
-+ */			
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
++ *	hacked by Andi Kleen for x86-64.
++ */
++
++#ifndef __ASSEMBLY__
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <linux/profile.h>
++#include <linux/smp.h>
++
++struct hw_interrupt_type;
++#endif
 +
++#define NMI_VECTOR		0x02
 +/*
 + * IDT vectors usable for external interrupt sources start
 + * at 0x20:
 + */
 +#define FIRST_EXTERNAL_VECTOR	0x20
 +
-+#define SYSCALL_VECTOR		0x80
++#define IA32_SYSCALL_VECTOR	0x80
++
 +
 +/*
 + * Vectors 0x20-0x2f are used for ISA interrupts.
 + */
 +
-+#if 0
 +/*
 + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
 + *
 + *  some of the following vectors are 'rare', they are merged
 + *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
 + *  TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
 + */
++#ifndef CONFIG_XEN
 +#define SPURIOUS_APIC_VECTOR	0xff
 +#define ERROR_APIC_VECTOR	0xfe
-+#define INVALIDATE_TLB_VECTOR	0xfd
-+#define RESCHEDULE_VECTOR	0xfc
-+#define CALL_FUNCTION_VECTOR	0xfb
++#define RESCHEDULE_VECTOR	0xfd
++#define CALL_FUNCTION_VECTOR	0xfc
++/* fb free - please don't readd KDB here because it's useless
++   (hint - think what a NMI bit does to a vector) */
++#define THERMAL_APIC_VECTOR	0xfa
++#define THRESHOLD_APIC_VECTOR   0xf9
++/* f8 free */
++#define INVALIDATE_TLB_VECTOR_END	0xf7
++#define INVALIDATE_TLB_VECTOR_START	0xf0	/* f0-f7 used for TLB flush */
++
++#define NUM_INVALIDATE_TLB_VECTORS	8
++#endif
 +
-+#define THERMAL_APIC_VECTOR	0xf0
 +/*
 + * Local APIC timer IRQ vector is on a different priority level,
 + * to work around the 'lost local interrupt if more than 2 IRQ
 + * sources per level' errata.
 + */
 +#define LOCAL_TIMER_VECTOR	0xef
-+#endif
-+
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
 +
 +/*
 + * First APIC vector available to drivers: (vectors 0x30-0xee)
@@ -86316,524 +125603,71 @@
 + * levels. (0x80 is the syscall vector)
 + */
 +#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef
-+
-+/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
-+ */
-+
-+#define RESCHEDULE_VECTOR	0
-+#define CALL_FUNCTION_VECTOR	1
-+#define NR_IPIS			2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ			13
-+
-+#define	FIRST_VM86_IRQ		3
-+#define LAST_VM86_IRQ		15
-+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
-+ *     if we have physical device-access privilege. This region is at the 
-+ *     start of the IRQ space so that existing device drivers do not need
-+ *     to be modified to translate physical IRQ numbers into our IRQ space.
-+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ *     are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE		0
-+#define NR_PIRQS		256
-+
-+#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS		256
-+
-+#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS		NR_IRQS
++#define FIRST_SYSTEM_VECTOR	0xef   /* duplicated in irq.h */
 +
-+#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
 +
-+#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
++#ifndef __ASSEMBLY__
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
++#define AUTO_ASSIGN		-1
 +
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/mach_traps.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/mach_traps.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/mach_traps.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/mach_traps.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,33 @@
 +/*
-+ *  include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
 + *
-+ *  Machine specific NMI handling for Xen
++ * Interrupt entry/exit code at both C and assembly level
 + */
-+#ifndef _MACH_TRAPS_H
-+#define _MACH_TRAPS_H
-+
-+#include <linux/bitops.h>
-+#include <xen/interface/nmi.h>
-+
-+static inline void clear_mem_error(unsigned char reason) {}
-+static inline void clear_io_check_error(unsigned char reason) {}
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	unsigned char reason = 0;
-+
-+	/* construct a value which looks like it came from
-+	 * port 0x61.
-+	 */
-+	if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+		reason |= 0x40;
-+	if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+		reason |= 0x80;
-+
-+        return reason;
-+}
-+
-+static inline void reassert_nmi(void) {}
-+
-+#endif /* !_MACH_TRAPS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/mach-xen/setup_arch.h tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/setup_arch.h
---- pristine-linux-2.6.18.2/include/asm-i386/mach-xen/setup_arch.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/mach-xen/setup_arch.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+void __init machine_specific_arch_setup(void);
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/page.h tmp-linux-2.6-xen.patch/include/asm-i386/page.h
---- pristine-linux-2.6.18.2/include/asm-i386/page.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/page.h	2007-10-14 01:51:15.000000000 +0200
-@@ -122,7 +122,7 @@ extern int page_is_ram(unsigned long pag
- 
- #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
- #define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
--#define MAXMEM			(-__PAGE_OFFSET-__VMALLOC_RESERVE)
-+#define MAXMEM			(__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
- #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
- #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
- #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/pgtable-2level-defs.h tmp-linux-2.6-xen.patch/include/asm-i386/pgtable-2level-defs.h
---- pristine-linux-2.6.18.2/include/asm-i386/pgtable-2level-defs.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/pgtable-2level-defs.h	2007-10-14 01:51:15.000000000 +0200
-@@ -1,6 +1,8 @@
- #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
- #define _I386_PGTABLE_2LEVEL_DEFS_H
- 
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
- /*
-  * traditional i386 two-level paging structure:
-  */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-i386/pgtable-3level-defs.h tmp-linux-2.6-xen.patch/include/asm-i386/pgtable-3level-defs.h
---- pristine-linux-2.6.18.2/include/asm-i386/pgtable-3level-defs.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-i386/pgtable-3level-defs.h	2007-10-14 01:51:15.000000000 +0200
-@@ -1,6 +1,8 @@
- #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
- #define _I386_PGTABLE_3LEVEL_DEFS_H
- 
-+#define HAVE_SHARED_KERNEL_PMD 1
-+
- /*
-  * PGDIR_SHIFT determines what a top-level page table entry can map
-  */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/agp.h tmp-linux-2.6-xen.patch/include/asm-ia64/agp.h
---- pristine-linux-2.6.18.2/include/asm-ia64/agp.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/agp.h	2007-07-30 16:35:13.000000000 +0200
-@@ -19,13 +19,44 @@
- #define flush_agp_cache()		mb()
- 
- /* Convert a physical address to an address suitable for the GART. */
-+#ifndef CONFIG_XEN
- #define phys_to_gart(x) (x)
- #define gart_to_phys(x) (x)
-+#else
-+#define phys_to_gart(x) phys_to_machine_for_dma(x)
-+#define gart_to_phys(x) machine_to_phys_for_dma(x)
-+#endif
- 
- /* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#ifndef CONFIG_XEN
- #define alloc_gatt_pages(order)		\
- 	((char *)__get_free_pages(GFP_KERNEL, (order)))
- #define free_gatt_pages(table, order)	\
- 	free_pages((unsigned long)(table), (order))
-+#else
-+#include <asm/hypervisor.h>
-+static inline char*
-+alloc_gatt_pages(unsigned int order)
-+{
-+	unsigned long error;
-+	unsigned long ret = __get_free_pages(GFP_KERNEL, (order));
-+	if (ret == 0) {
-+		goto out;
-+	}
-+	error = xen_create_contiguous_region(ret, order, 0);
-+	if (error) {
-+		free_pages(ret, order);
-+		ret = 0;
-+	}
-+out:
-+	return (char*)ret;
-+}
-+static inline void
-+free_gatt_pages(void* table, unsigned int order)
-+{
-+	xen_destroy_contiguous_region((unsigned long)table, order);
-+	free_pages((unsigned long)table, order);
-+}
-+#endif /* CONFIG_XEN */
- 
- #endif /* _ASM_IA64_AGP_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/dma-mapping.h tmp-linux-2.6-xen.patch/include/asm-ia64/dma-mapping.h
---- pristine-linux-2.6.18.2/include/asm-ia64/dma-mapping.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/dma-mapping.h	2007-07-30 16:35:13.000000000 +0200
-@@ -6,7 +6,14 @@
-  *	David Mosberger-Tang <davidm at hpl.hp.com>
-  */
- #include <asm/machvec.h>
-+#ifdef CONFIG_XEN
-+/* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
-+#include <asm/hypervisor.h>
-+/* Needed for arch/i386/kernel/swiotlb.c */
-+#include <asm/swiotlb.h>
-+#endif
- 
-+#ifndef CONFIG_XEN
- #define dma_alloc_coherent	platform_dma_alloc_coherent
- #define dma_alloc_noncoherent	platform_dma_alloc_coherent	/* coherent mem. is cheap */
- #define dma_free_coherent	platform_dma_free_coherent
-@@ -20,6 +27,46 @@
- #define dma_sync_single_for_device platform_dma_sync_single_for_device
- #define dma_sync_sg_for_device	platform_dma_sync_sg_for_device
- #define dma_mapping_error	platform_dma_mapping_error
-+#else
-+int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+               enum dma_data_direction direction);
-+void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+                  enum dma_data_direction direction);
-+int dma_supported(struct device *dev, u64 mask);
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+                         dma_addr_t *dma_handle, gfp_t gfp);
-+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-+                       dma_addr_t dma_handle);
-+dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-+                          enum dma_data_direction direction);
-+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+                      enum dma_data_direction direction);
-+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-+                             size_t size, enum dma_data_direction direction);
-+void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-+                                size_t size,
-+                                enum dma_data_direction direction);
-+int dma_mapping_error(dma_addr_t dma_addr);
 +
-+#define flush_write_buffers()	do { } while (0)
-+static inline void
-+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-+                    enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
-+	flush_write_buffers();
-+}
++extern void disable_8259A_irq(unsigned int irq);
++extern void enable_8259A_irq(unsigned int irq);
++extern int i8259A_irq_pending(unsigned int irq);
++extern void make_8259A_irq(unsigned int irq);
++extern void init_8259A(int aeoi);
++extern void FASTCALL(send_IPI_self(int vector));
++extern void init_VISWS_APIC_irqs(void);
++extern void setup_IO_APIC(void);
++extern void disable_IO_APIC(void);
++#define print_IO_APIC()
++extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++extern void send_IPI(int dest, int vector);
++extern void setup_ioapic_dest(void);
 +
-+static inline void
-+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-+                       enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
-+	flush_write_buffers();
-+}
-+#endif
- 
- #define dma_map_page(dev, pg, off, size, dir)				\
- 	dma_map_single(dev, page_address(pg) + (off), (size), (dir))
-@@ -36,7 +83,9 @@
- #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)	\
- 	dma_sync_single_for_device(dev, dma_handle, size, dir)
- 
-+#ifndef CONFIG_XEN
- #define dma_supported		platform_dma_supported
-+#endif
- 
- static inline int
- dma_set_mask (struct device *dev, u64 mask)
-@@ -61,4 +110,27 @@ dma_cache_sync (void *vaddr, size_t size
- 
- #define dma_is_consistent(dma_handle)	(1)	/* all we do is coherent memory... */
- 
-+#ifdef CONFIG_XEN
-+/* arch/i386/kernel/swiotlb.o requires */
-+void contiguous_bitmap_init(unsigned long end_pfn);
++extern unsigned long io_apic_irqs;
 +
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+	dma_addr_t mask = DMA_64BIT_MASK;
-+	/* If the device has a mask, use it, otherwise default to 64 bits */
-+	if (hwdev && hwdev->dma_mask)
-+		mask = *hwdev->dma_mask;
-+	return (addr & ~mask) != 0;
-+}
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
 +
-+static inline int
-+range_straddles_page_boundary(void *p, size_t size)
-+{
-+	extern unsigned long *contiguous_bitmap;
-+	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+	        !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
-+}
-+#endif
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
 +
- #endif /* _ASM_IA64_DMA_MAPPING_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/fixmap.h tmp-linux-2.6-xen.patch/include/asm-ia64/fixmap.h
---- pristine-linux-2.6.18.2/include/asm-ia64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/fixmap.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,2 @@
-+#define clear_fixmap(x)	do {} while (0)
-+#define	set_fixmap(x,y)	do {} while (0)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/gcc_intrin.h tmp-linux-2.6-xen.patch/include/asm-ia64/gcc_intrin.h
---- pristine-linux-2.6.18.2/include/asm-ia64/gcc_intrin.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/gcc_intrin.h	2007-07-30 16:35:13.000000000 +0200
-@@ -26,7 +26,7 @@ extern void ia64_bad_param_for_getreg (v
- 
- register unsigned long ia64_r13 asm ("r13") __attribute_used__;
- 
--#define ia64_setreg(regnum, val)						\
-+#define __ia64_setreg(regnum, val)						\
- ({										\
- 	switch (regnum) {							\
- 	    case _IA64_REG_PSR_L:						\
-@@ -55,7 +55,7 @@ register unsigned long ia64_r13 asm ("r1
- 	}									\
- })
- 
--#define ia64_getreg(regnum)							\
-+#define __ia64_getreg(regnum)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 										\
-@@ -92,7 +92,7 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_hint_pause 0
- 
--#define ia64_hint(mode)						\
-+#define __ia64_hint(mode)						\
- ({								\
- 	switch (mode) {						\
- 	case ia64_hint_pause:					\
-@@ -374,7 +374,7 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_invala() asm volatile ("invala" ::: "memory")
- 
--#define ia64_thash(addr)							\
-+#define __ia64_thash(addr)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
-@@ -394,18 +394,18 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
- 
--#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
- 
--#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
- 
- 
--#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
-+#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"			\
- 					     :: "r"(trnum), "r"(addr) : "memory")
- 
--#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
-+#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"			\
- 					     :: "r"(trnum), "r"(addr) : "memory")
- 
--#define ia64_tpa(addr)								\
-+#define __ia64_tpa(addr)							\
- ({										\
- 	__u64 ia64_pa;								\
- 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
-@@ -415,22 +415,22 @@ register unsigned long ia64_r13 asm ("r1
- #define __ia64_set_dbr(index, val)						\
- 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_ibr(index, val)						\
-+#define __ia64_set_ibr(index, val)						\
- 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pkr(index, val)						\
-+#define __ia64_set_pkr(index, val)						\
- 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pmc(index, val)						\
-+#define __ia64_set_pmc(index, val)						\
- 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pmd(index, val)						\
-+#define __ia64_set_pmd(index, val)						\
- 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_rr(index, val)							\
-+#define __ia64_set_rr(index, val)							\
- 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
- 
--#define ia64_get_cpuid(index)								\
-+#define __ia64_get_cpuid(index)								\
- ({											\
- 	__u64 ia64_intri_res;								\
- 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
-@@ -444,21 +444,21 @@ register unsigned long ia64_r13 asm ("r1
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_ibr(index)							\
-+#define __ia64_get_ibr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_pkr(index)							\
-+#define __ia64_get_pkr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_pmc(index)							\
-+#define __ia64_get_pmc(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
-@@ -466,48 +466,48 @@ register unsigned long ia64_r13 asm ("r1
- })
- 
- 
--#define ia64_get_pmd(index)							\
-+#define __ia64_get_pmd(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_rr(index)							\
-+#define __ia64_get_rr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
-+#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
- 
- 
- #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
- 
--#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
--#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
-+#define __ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
-+#define __ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
- #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
- #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
- 
--#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
-+#define __ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
- 
--#define ia64_ptcga(addr, size)							\
-+#define __ia64_ptcga(addr, size)							\
- do {										\
- 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
- 	ia64_dv_serialize_data();						\
- } while (0)
- 
--#define ia64_ptcl(addr, size)							\
-+#define __ia64_ptcl(addr, size)							\
- do {										\
- 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
- 	ia64_dv_serialize_data();						\
- } while (0)
- 
--#define ia64_ptri(addr, size)						\
-+#define __ia64_ptri(addr, size)						\
- 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
- 
--#define ia64_ptrd(addr, size)						\
-+#define __ia64_ptrd(addr, size)						\
- 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
- 
- /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
-@@ -589,7 +589,7 @@ do {										\
-         }								\
- })
- 
--#define ia64_intrin_local_irq_restore(x)			\
-+#define __ia64_intrin_local_irq_restore(x)			\
- do {								\
- 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
- 		      "(p6) ssm psr.i;"				\
-@@ -598,4 +598,6 @@ do {								\
- 		      :: "r"((x)) : "p6", "p7", "memory");	\
- } while (0)
- 
-+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#include <asm/ptrace.h>
++
++#define IRQ_NAME2(nr) nr##_interrupt(void)
++#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
++
++/*
++ *	SMP has a few special interrupts for IPI messages
++ */
++
++#define BUILD_IRQ(nr) \
++asmlinkage void IRQ_NAME(nr); \
++__asm__( \
++"\n.p2align\n" \
++"IRQ" #nr "_interrupt:\n\t" \
++	"push $~(" #nr ") ; " \
++	"jmp common_interrupt");
++
++#define platform_legacy_irq(irq)	((irq) < 16)
 +
- #endif /* _ASM_IA64_GCC_INTRIN_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/hw_irq.h tmp-linux-2.6-xen.patch/include/asm-ia64/hw_irq.h
---- pristine-linux-2.6.18.2/include/asm-ia64/hw_irq.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/hw_irq.h	2007-07-30 16:35:13.000000000 +0200
-@@ -15,7 +15,11 @@
- #include <asm/ptrace.h>
- #include <asm/smp.h>
- 
-+#ifndef CONFIG_XEN
- typedef u8 ia64_vector;
-+#else
-+typedef u16 ia64_vector;
 +#endif
- 
- /*
-  * 0 special
-@@ -99,6 +103,12 @@ extern void register_percpu_irq (ia64_ve
- 
- static inline void ia64_resend_irq(unsigned int vector)
- {
-+#ifdef CONFIG_XEN
-+	extern int resend_irq_on_evtchn(unsigned int i);
-+	if (is_running_on_xen())
-+		resend_irq_on_evtchn(vector);
-+	else
-+#endif /* CONFIG_XEN */
- 	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
- }
- 
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/hypercall.h tmp-linux-2.6-xen.patch/include/asm-ia64/hypercall.h
---- pristine-linux-2.6.18.2/include/asm-ia64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/hypercall.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,425 @@
++
++#endif /* _ASM_HW_IRQ_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/hypercall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hypercall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,415 @@
 +/******************************************************************************
 + * hypercall.h
 + * 
@@ -86841,6 +125675,10 @@
 + * 
 + * Copyright (c) 2002-2004, K A Fraser
 + * 
++ * 64-bit updates:
++ *   Benjamin Liu <benjamin.liu at intel.com>
++ *   Jun Nakajima <jun.nakajima at intel.com>
++ * 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License version 2
 + * as published by the Free Software Foundation; or, when distributed
@@ -86869,9055 +125707,9471 @@
 +#ifndef __HYPERCALL_H__
 +#define __HYPERCALL_H__
 +
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
++
 +#ifndef __HYPERVISOR_H__
 +# error "please don't include this file directly"
 +#endif
 +
-+#include <asm/xen/xcom_hcall.h>
-+struct xencomm_handle;
-+extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
-+                                 unsigned long a3, unsigned long a4,
-+                                 unsigned long a5, unsigned long cmd);
-+
-+/*
-+ * Assembler stubs for hyper-calls.
-+ */
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name)					\
++	"call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name)					\
++	"mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++	"add hypercall_stubs(%%rip),%%rax; "			\
++	"call *%%rax"
++#endif
 +
-+#define _hypercall0(type, name)					\
-+({								\
-+	long __res;						\
-+	__res=__hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);	\
-+	(type)__res;						\
++#define _hypercall0(type, name)			\
++({						\
++	type __res;				\
++	asm volatile (				\
++		HYPERCALL_STR(name)		\
++		: "=a" (__res)			\
++		:				\
++		: "memory" );			\
++	__res;					\
 +})
 +
 +#define _hypercall1(type, name, a1)				\
 +({								\
-+	long __res;						\
-+	__res = __hypercall((unsigned long)a1,			\
-+	                     0, 0, 0, 0, __HYPERVISOR_##name);	\
-+	(type)__res;						\
++	type __res;						\
++	long __ign1;						\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=D" (__ign1)			\
++		: "1" ((long)(a1))				\
++		: "memory" );					\
++	__res;							\
 +})
 +
 +#define _hypercall2(type, name, a1, a2)				\
 +({								\
-+	long __res;						\
-+	__res = __hypercall((unsigned long)a1,			\
-+	                    (unsigned long)a2,			\
-+	                    0, 0, 0, __HYPERVISOR_##name);	\
-+	(type)__res;						\
++	type __res;						\
++	long __ign1, __ign2;					\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
++		: "1" ((long)(a1)), "2" ((long)(a2))		\
++		: "memory" );					\
++	__res;							\
 +})
 +
 +#define _hypercall3(type, name, a1, a2, a3)			\
 +({								\
-+	long __res;						\
-+	__res = __hypercall((unsigned long)a1,			\
-+	                    (unsigned long)a2,			\
-+	                    (unsigned long)a3,			\
-+	                    0, 0, __HYPERVISOR_##name);		\
-+	(type)__res;						\
++	type __res;						\
++	long __ign1, __ign2, __ign3;				\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3))				\
++		: "memory" );					\
++	__res;							\
 +})
 +
 +#define _hypercall4(type, name, a1, a2, a3, a4)			\
 +({								\
-+	long __res;						\
-+	__res = __hypercall((unsigned long)a1,			\
-+	                    (unsigned long)a2,			\
-+	                    (unsigned long)a3,			\
-+	                    (unsigned long)a4,			\
-+	                    0, __HYPERVISOR_##name);		\
-+	(type)__res;						\
++	type __res;						\
++	long __ign1, __ign2, __ign3;				\
++	register long __arg4 asm("r10") = (long)(a4);		\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
++		  "=d" (__ign3), "+r" (__arg4)			\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		  "3" ((long)(a3))				\
++		: "memory" );					\
++	__res;							\
 +})
 +
 +#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
 +({								\
-+	long __res;						\
-+	__res = __hypercall((unsigned long)a1,			\
-+	                    (unsigned long)a2,			\
-+	                    (unsigned long)a3,			\
-+	                    (unsigned long)a4,			\
-+	                    (unsigned long)a5,			\
-+	                    __HYPERVISOR_##name);		\
-+	(type)__res;						\
++	type __res;						\
++	long __ign1, __ign2, __ign3;				\
++	register long __arg4 asm("r10") = (long)(a4);		\
++	register long __arg5 asm("r8") = (long)(a5);		\
++	asm volatile (						\
++		HYPERCALL_STR(name)				\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
++		  "=d" (__ign3), "+r" (__arg4), "+r" (__arg5)	\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		  "3" ((long)(a3))				\
++		: "memory" );					\
++	__res;							\
 +})
 +
-+
-+static inline int
-+xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
-+{
-+	return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(u64 timeout)
-+{
-+	unsigned long timeout_hi = (unsigned long)(timeout >> 32);
-+	unsigned long timeout_lo = (unsigned long)timeout;
-+	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_platform_op(struct xencomm_handle *op)
-+{
-+	return _hypercall1(int, platform_op, op);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_sysctl(struct xencomm_handle *op)
-+{
-+	return _hypercall1(int, sysctl, op);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_domctl(struct xencomm_handle *op)
-+{
-+	return _hypercall1(int, domctl, op);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
-+				 int nr_calls)
-+{
-+	return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
-+{
-+	return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
-+{
-+	return _hypercall2(int, event_channel_op, cmd, arg);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_acm_op(struct xencomm_handle *arg)
-+{
-+	return _hypercall1(int, acm_op, arg);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
-+{
-+	return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_console_io(int cmd, int count,
-+                                  struct xencomm_handle *str)
-+{
-+	return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++	const trap_info_t *table)
 +{
-+	return _hypercall2(int, physdev_op, cmd, arg);
++	return _hypercall1(int, set_trap_table, table);
 +}
 +
-+static inline int
-+xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
-+                                      struct xencomm_handle *uop,
-+                                      unsigned int count)
++static inline int __must_check
++HYPERVISOR_mmu_update(
++	mmu_update_t *req, unsigned int count, unsigned int *success_count,
++	domid_t domid)
 +{
-+	return _hypercall3(int, grant_table_op, cmd, uop, count);
++	return _hypercall4(int, mmu_update, req, count, success_count, domid);
 +}
 +
-+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-+
-+extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
-+
-+static inline int
-+xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++	struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++	domid_t domid)
 +{
-+	return _hypercall2(int, callback_op, cmd, arg);
++	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
 +}
 +
-+static inline unsigned long
-+xencomm_arch_hypercall_hvm_op(int cmd, void *arg)
++static inline int __must_check
++HYPERVISOR_set_gdt(
++	unsigned long *frame_list, unsigned int entries)
 +{
-+	return _hypercall2(unsigned long, hvm_op, cmd, arg);
++	return _hypercall2(int, set_gdt, frame_list, entries);
 +}
 +
-+static inline long
-+xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
++static inline int __must_check
++HYPERVISOR_stack_switch(
++	unsigned long ss, unsigned long esp)
 +{
-+	return _hypercall3(long, vcpu_op, cmd, cpu, arg);
++	return _hypercall2(int, stack_switch, ss, esp);
 +}
 +
-+static inline int
-+HYPERVISOR_physdev_op(int cmd, void *arg)
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++	unsigned long event_address, unsigned long failsafe_address, 
++	unsigned long syscall_address)
 +{
-+	switch (cmd) {
-+	case PHYSDEVOP_eoi:
-+		return _hypercall1(int, ia64_fast_eoi,
-+		                   ((struct physdev_eoi *)arg)->irq);
-+	default:
-+		return xencomm_hypercall_physdev_op(cmd, arg);
-+	}
++	return _hypercall3(int, set_callbacks,
++			   event_address, failsafe_address, syscall_address);
 +}
 +
 +static inline int
-+xencomm_arch_hypercall_xenoprof_op(int op, struct xencomm_handle *arg)
-+{
-+	return _hypercall2(int, xenoprof_op, op, arg);
-+}
-+
-+extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
-+static inline void exit_idle(void) {}
-+#define do_IRQ(irq, regs) ({			\
-+	irq_enter();				\
-+	__do_IRQ((irq), (regs));		\
-+	irq_exit();				\
-+})
-+
-+#include <linux/err.h>
-+#ifdef CONFIG_XEN
-+#include <asm/xen/privop.h>
-+#endif /* CONFIG_XEN */
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+static inline unsigned long
-+__HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
-+{
-+	return _hypercall3(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_ioremap, ioaddr, size);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
-+{
-+	unsigned long ret = ioaddr;
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_ioremap(ioaddr, size);
-+		if (unlikely(ret == -ENOSYS))
-+			panic("hypercall %s failed with %ld. "
-+			      "Please check Xen and Linux config mismatch\n",
-+			      __func__, -ret);
-+		else if (unlikely(IS_ERR_VALUE(ret)))
-+			ret = ioaddr;
-+	}
-+	return ret;
-+}
-+
-+static inline unsigned long
-+__HYPERVISOR_phystomach(unsigned long gpfn)
-+{
-+	return _hypercall2(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_phystomach, gpfn);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_phystomach(unsigned long gpfn)
-+{
-+	unsigned long ret = gpfn;
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_phystomach(gpfn);
-+	}
-+	return ret;
-+}
-+
-+static inline unsigned long
-+__HYPERVISOR_machtophys(unsigned long mfn)
-+{
-+	return _hypercall2(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_machtophys, mfn);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_machtophys(unsigned long mfn)
++HYPERVISOR_fpu_taskswitch(
++	int set)
 +{
-+	unsigned long ret = mfn;
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_machtophys(mfn);
-+	}
-+	return ret;
++	return _hypercall1(int, fpu_taskswitch, set);
 +}
 +
-+static inline unsigned long
-+__HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++	int cmd, unsigned long arg)
 +{
-+	return _hypercall3(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_zap_physmap, gpfn, extent_order);
++	return _hypercall2(int, sched_op_compat, cmd, arg);
 +}
 +
-+static inline unsigned long
-+HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++static inline int __must_check
++HYPERVISOR_sched_op(
++	int cmd, void *arg)
 +{
-+	unsigned long ret = 0;
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_zap_physmap(gpfn, extent_order);
-+	}
-+	return ret;
++	return _hypercall2(int, sched_op, cmd, arg);
 +}
 +
-+static inline unsigned long
-+__HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-+			 unsigned long flags, domid_t domid)
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++	u64 timeout)
 +{
-+	return _hypercall5(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_add_physmap, gpfn, mfn, flags, domid);
++	return _hypercall1(long, set_timer_op, timeout);
 +}
 +
-+static inline unsigned long
-+HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
-+		       unsigned long flags, domid_t domid)
++static inline int __must_check
++HYPERVISOR_platform_op(
++	struct xen_platform_op *platform_op)
 +{
-+	unsigned long ret = 0;
-+	BUG_ON(!is_running_on_xen());//XXX
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
-+	}
-+	return ret;
++	platform_op->interface_version = XENPF_INTERFACE_VERSION;
++	return _hypercall1(int, platform_op, platform_op);
 +}
 +
-+static inline unsigned long
-+__HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
-+                                   unsigned long flags, domid_t domid)
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++	unsigned int reg, unsigned long value)
 +{
-+	return _hypercall5(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_add_physmap_with_gmfn,
-+	                   gpfn, gmfn, flags, domid);
++	return _hypercall2(int, set_debugreg, reg, value);
 +}
 +
-+static inline unsigned long
-+HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
-+				 unsigned long flags, domid_t domid)
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++	unsigned int reg)
 +{
-+	unsigned long ret = 0;
-+	BUG_ON(!is_running_on_xen());//XXX
-+	if (is_running_on_xen()) {
-+		ret = __HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn,
-+		                                         flags, domid);
-+	}
-+	return ret;
++	return _hypercall1(unsigned long, get_debugreg, reg);
 +}
 +
-+#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
-+static inline unsigned long
-+HYPERVISOR_expose_p2m(unsigned long conv_start_gpfn,
-+                      unsigned long assign_start_gpfn,
-+                      unsigned long expose_size, unsigned long granule_pfn)
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++	unsigned long ma, unsigned long word)
 +{
-+	return _hypercall5(unsigned long, ia64_dom0vp_op,
-+	                   IA64_DOM0VP_expose_p2m, conv_start_gpfn,
-+	                   assign_start_gpfn, expose_size, granule_pfn);
++	return _hypercall2(int, update_descriptor, ma, word);
 +}
-+#endif
 +
-+static inline int
-+xencomm_arch_hypercall_perfmon_op(unsigned long cmd,
-+                                  struct xencomm_handle *arg,
-+                                  unsigned long count)
++static inline int __must_check
++HYPERVISOR_memory_op(
++	unsigned int cmd, void *arg)
 +{
-+	return _hypercall4(int, ia64_dom0vp_op,
-+			   IA64_DOM0VP_perfmon, cmd, arg, count);
++	return _hypercall2(int, memory_op, cmd, arg);
 +}
 +
-+static inline int
-+HYPERVISOR_add_io_space(unsigned long phys_base,
-+			unsigned long sparse,
-+			unsigned long space_number)
++static inline int __must_check
++HYPERVISOR_multicall(
++	multicall_entry_t *call_list, unsigned int nr_calls)
 +{
-+	return _hypercall4(int, ia64_dom0vp_op, IA64_DOM0VP_add_io_space,
-+			   phys_base, sparse, space_number);
++	return _hypercall2(int, multicall, call_list, nr_calls);
 +}
 +
-+// for balloon driver
-+#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
-+
-+/* Use xencomm to do hypercalls.  */
-+#ifdef MODULE
-+#define HYPERVISOR_sched_op xencomm_mini_hypercall_sched_op
-+#define HYPERVISOR_event_channel_op xencomm_mini_hypercall_event_channel_op
-+#define HYPERVISOR_callback_op xencomm_mini_hypercall_callback_op
-+#define HYPERVISOR_multicall xencomm_mini_hypercall_multicall
-+#define HYPERVISOR_xen_version xencomm_mini_hypercall_xen_version
-+#define HYPERVISOR_console_io xencomm_mini_hypercall_console_io
-+#define HYPERVISOR_hvm_op xencomm_mini_hypercall_hvm_op
-+#define HYPERVISOR_memory_op xencomm_mini_hypercall_memory_op
-+#define HYPERVISOR_xenoprof_op xencomm_mini_hypercall_xenoprof_op
-+#define HYPERVISOR_perfmon_op xencomm_mini_hypercall_perfmon_op
-+#else
-+#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
-+#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
-+#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
-+#define HYPERVISOR_multicall xencomm_hypercall_multicall
-+#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
-+#define HYPERVISOR_console_io xencomm_hypercall_console_io
-+#define HYPERVISOR_hvm_op xencomm_hypercall_hvm_op
-+#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
-+#define HYPERVISOR_xenoprof_op xencomm_hypercall_xenoprof_op
-+#define HYPERVISOR_perfmon_op xencomm_hypercall_perfmon_op
-+#endif
-+
-+#define HYPERVISOR_suspend xencomm_hypercall_suspend
-+#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
-+
-+#endif /* __HYPERCALL_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/hypervisor.h tmp-linux-2.6-xen.patch/include/asm-ia64/hypervisor.h
---- pristine-linux-2.6.18.2/include/asm-ia64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/hypervisor.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,226 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#ifdef CONFIG_XEN
-+extern int running_on_xen;
-+#define is_running_on_xen()			(running_on_xen)
-+#else /* CONFIG_XEN */
-+# ifdef CONFIG_VMX_GUEST
-+#  define is_running_on_xen()			(1)
-+# else /* CONFIG_VMX_GUEST */
-+#  define is_running_on_xen()			(0)
-+#  define HYPERVISOR_ioremap(offset, size)	(offset)
-+# endif /* CONFIG_VMX_GUEST */
-+#endif /* CONFIG_XEN */
-+
-+#if defined(CONFIG_XEN) || defined(CONFIG_VMX_GUEST)
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <linux/errno.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/sched.h>
-+#include <xen/hypercall.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+extern start_info_t *xen_start_info;
-+
-+void force_evtchn_callback(void);
-+
-+/* Turn jiffies into Xen system time. XXX Implement me. */
-+#define jiffies_to_st(j)	0
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++	unsigned long va, pte_t new_val, unsigned long flags)
++{
++	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++}
 +
-+static inline int
-+HYPERVISOR_yield(
-+	void)
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++	int cmd, void *arg)
 +{
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
++	int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		struct evtchn_op op;
++		op.cmd = cmd;
++		memcpy(&op.u, arg, sizeof(op.u));
++		rc = _hypercall1(int, event_channel_op_compat, &op);
++		memcpy(arg, &op.u, sizeof(op.u));
++	}
++#endif
 +
 +	return rc;
 +}
 +
-+static inline int
-+HYPERVISOR_block(
-+	void)
++static inline int __must_check
++HYPERVISOR_acm_op(
++	int cmd, void *arg)
 +{
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++	return _hypercall2(int, acm_op, cmd, arg);
++}
 +
-+	return rc;
++static inline int __must_check
++HYPERVISOR_xen_version(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, xen_version, cmd, arg);
 +}
 +
-+static inline int
-+HYPERVISOR_shutdown(
-+	unsigned int reason)
++static inline int __must_check
++HYPERVISOR_console_io(
++	int cmd, unsigned int count, char *str)
 +{
-+	struct sched_shutdown sched_shutdown = {
-+		.reason = reason
-+	};
++	return _hypercall3(int, console_io, cmd, count, str);
++}
 +
-+	int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++static inline int __must_check
++HYPERVISOR_physdev_op(
++	int cmd, void *arg)
++{
++	int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (unlikely(rc == -ENOSYS)) {
++		struct physdev_op op;
++		op.cmd = cmd;
++		memcpy(&op.u, arg, sizeof(op.u));
++		rc = _hypercall1(int, physdev_op_compat, &op);
++		memcpy(arg, &op.u, sizeof(op.u));
++	}
++#endif
 +
 +	return rc;
 +}
 +
-+static inline int
-+HYPERVISOR_poll(
-+	evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++	unsigned int cmd, void *uop, unsigned int count)
 +{
-+	struct sched_poll sched_poll = {
-+		.nr_ports = nr_ports,
-+		.timeout = jiffies_to_st(timeout)
-+	};
++	return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
 +
-+	int rc;
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++	return _hypercall4(int, update_va_mapping_otherdomain, va,
++			   new_val.pte, flags, domid);
++}
 +
-+	set_xen_guest_handle(sched_poll.ports, ports);
-+	rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++static inline int __must_check
++HYPERVISOR_vm_assist(
++	unsigned int cmd, unsigned int type)
++{
++	return _hypercall2(int, vm_assist, cmd, type);
++}
 +
-+	return rc;
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++	int cmd, unsigned int vcpuid, void *extra_args)
++{
++	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
 +}
 +
-+#ifndef CONFIG_VMX_GUEST
-+// for drivers/xen/privcmd/privcmd.c
-+#define machine_to_phys_mapping 0
-+struct vm_area_struct;
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+			   unsigned long address,
-+			   unsigned long mfn,
-+			   unsigned long size,
-+			   pgprot_t prot,
-+			   domid_t  domid);
-+struct file;
-+int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
-+int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
-+#define HAVE_ARCH_PRIVCMD_MMAP
++static inline int __must_check
++HYPERVISOR_set_segment_base(
++	int reg, unsigned long value)
++{
++	return _hypercall2(int, set_segment_base, reg, value);
++}
 +
-+// for drivers/xen/balloon/balloon.c
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+#define	pte_mfn(_x)	pte_pfn(_x)
-+#define phys_to_machine_mapping_valid(_x)	(1)
++static inline int __must_check
++HYPERVISOR_suspend(
++	unsigned long srec)
++{
++	struct sched_shutdown sched_shutdown = {
++		.reason = SHUTDOWN_suspend
++	};
 +
-+#endif /* !CONFIG_VMX_GUEST */
++	int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++			     &sched_shutdown, srec);
 +
-+#define __pte_ma(_x)	((pte_t) {(_x)})        /* unmodified use */
-+#define pfn_pte_ma(_x,_y)	__pte_ma(0)     /* unmodified use */
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (rc == -ENOSYS)
++		rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++				 SHUTDOWN_suspend, srec);
++#endif
 +
-+#ifndef CONFIG_VMX_GUEST
-+int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits);
++	return rc;
++}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
 +static inline int
-+xen_create_contiguous_region(unsigned long vstart,
-+                             unsigned int order, unsigned int address_bits)
++HYPERVISOR_nmi_op(
++	unsigned long op, void *arg)
 +{
-+	int ret = 0;
-+	if (is_running_on_xen()) {
-+		ret = __xen_create_contiguous_region(vstart, order,
-+		                                     address_bits);
-+	}
-+	return ret;
++	return _hypercall2(int, nmi_op, op, arg);
 +}
++#endif
 +
-+void __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
-+static inline void
-+xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++    int op, void *arg)
 +{
-+	if (is_running_on_xen())
-+		__xen_destroy_contiguous_region(vstart, order);
++    return _hypercall2(unsigned long, hvm_op, op, arg);
 +}
++#endif
 +
-+/* For drivers/xen/core/machine_reboot.c */
-+#define HAVE_XEN_POST_SUSPEND
-+void xen_post_suspend(int suspend_cancelled);
-+#endif /* !CONFIG_VMX_GUEST */
-+
-+// for netfront.c, netback.c
-+#define MULTI_UVMFLAGS_INDEX 0 //XXX any value
++static inline int __must_check
++HYPERVISOR_callback_op(
++	int cmd, const void *arg)
++{
++	return _hypercall2(int, callback_op, cmd, arg);
++}
 +
-+static inline void
-+MULTI_update_va_mapping(
-+	multicall_entry_t *mcl, unsigned long va,
-+	pte_t new_val, unsigned long flags)
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++	int op, void *arg)
 +{
-+	mcl->op = __HYPERVISOR_update_va_mapping;
-+	mcl->result = 0;
++	return _hypercall2(int, xenoprof_op, op, arg);
 +}
 +
-+static inline void
-+MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
-+	void *uop, unsigned int count)
++static inline int __must_check
++HYPERVISOR_kexec_op(
++	unsigned long op, void *args)
 +{
-+	mcl->op = __HYPERVISOR_grant_table_op;
-+	mcl->args[0] = cmd;
-+	mcl->args[1] = (unsigned long)uop;
-+	mcl->args[2] = count;
++	return _hypercall2(int, kexec_op, op, args);
 +}
 +
++#endif /* __HYPERCALL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/hypervisor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hypervisor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/mach-xen/asm/hypervisor.h>
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/io.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/io.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,329 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <asm/fixmap.h>
++
 +/*
-+ * for blktap.c
-+ * int create_lookup_pte_addr(struct mm_struct *mm, 
-+ *                            unsigned long address,
-+ *                            uint64_t *ptep);
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
 + */
-+#define create_lookup_pte_addr(mm, address, ptep)			\
-+	({								\
-+		printk(KERN_EMERG					\
-+		       "%s:%d "						\
-+		       "create_lookup_pte_addr() isn't supported.\n",	\
-+		       __func__, __LINE__);				\
-+		BUG();							\
-+		(-ENOSYS);						\
-+	})
 +
-+// for debug
-+asmlinkage int xprintk(const char *fmt, ...);
-+#define xprintd(fmt, ...)	xprintk("%s:%d " fmt, __func__, __LINE__, \
-+					##__VA_ARGS__)
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ *		Linus
++ */
 +
-+#endif /* CONFIG_XEN || CONFIG_VMX_GUEST */
++ /*
++  *  Bit simplified and optimized by Jan Hubicka
++  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++  *
++  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++  *  isa_read[wl] and isa_write[wl] fixed
++  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
++  */
 +
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#define is_initial_xendomain()						\
-+	(is_running_on_xen() ? xen_start_info->flags & SIF_INITDOMAIN : 0)
++#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
++
++#ifdef REALLY_SLOW_IO
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
 +#else
-+#define is_initial_xendomain() 0
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
 +#endif
 +
-+#endif /* __HYPERVISOR_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/intel_intrin.h tmp-linux-2.6-xen.patch/include/asm-ia64/intel_intrin.h
---- pristine-linux-2.6.18.2/include/asm-ia64/intel_intrin.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/intel_intrin.h	2007-07-30 16:35:13.000000000 +0200
-@@ -16,8 +16,8 @@
- 		 	 * intrinsic
- 		 	 */
- 
--#define ia64_getreg		__getReg
--#define ia64_setreg		__setReg
-+#define __ia64_getreg		__getReg
-+#define __ia64_setreg		__setReg
- 
- #define ia64_hint		__hint
- #define ia64_hint_pause		__hint_pause
-@@ -33,16 +33,16 @@
- #define ia64_getf_exp		__getf_exp
- #define ia64_shrp		_m64_shrp
- 
--#define ia64_tpa		__tpa
-+#define __ia64_tpa		__tpa
- #define ia64_invala		__invala
- #define ia64_invala_gr		__invala_gr
- #define ia64_invala_fr		__invala_fr
- #define ia64_nop		__nop
- #define ia64_sum		__sum
--#define ia64_ssm		__ssm
-+#define __ia64_ssm		__ssm
- #define ia64_rum		__rum
--#define ia64_rsm		__rsm
--#define ia64_fc 		__fc
-+#define __ia64_rsm		__rsm
-+#define __ia64_fc 		__fc
- 
- #define ia64_ldfs		__ldfs
- #define ia64_ldfd		__ldfd
-@@ -80,24 +80,24 @@
- 
- #define __ia64_set_dbr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_DBR, index, val)
--#define ia64_set_ibr(index, val)	\
-+#define __ia64_set_ibr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_IBR, index, val)
--#define ia64_set_pkr(index, val)	\
-+#define __ia64_set_pkr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PKR, index, val)
--#define ia64_set_pmc(index, val)	\
-+#define __ia64_set_pmc(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PMC, index, val)
--#define ia64_set_pmd(index, val)	\
-+#define __ia64_set_pmd(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PMD, index, val)
--#define ia64_set_rr(index, val)	\
-+#define __ia64_set_rr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_RR, index, val)
- 
--#define ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
-+#define __ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
- #define __ia64_get_dbr(index) 	__getIndReg(_IA64_REG_INDR_DBR, index)
--#define ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
--#define ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
--#define ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
--#define ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
--#define ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
-+#define __ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
-+#define __ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
-+#define __ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
-+#define __ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
-+#define __ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
- 
- #define ia64_srlz_d		__dsrlz
- #define ia64_srlz_i		__isrlz
-@@ -116,18 +116,18 @@
- #define ia64_ld8_acq		__ld8_acq
- 
- #define ia64_sync_i		__synci
--#define ia64_thash		__thash
--#define ia64_ttag		__ttag
--#define ia64_itcd		__itcd
--#define ia64_itci		__itci
--#define ia64_itrd		__itrd
--#define ia64_itri		__itri
--#define ia64_ptce		__ptce
--#define ia64_ptcl		__ptcl
--#define ia64_ptcg		__ptcg
--#define ia64_ptcga		__ptcga
--#define ia64_ptri		__ptri
--#define ia64_ptrd		__ptrd
-+#define __ia64_thash		__thash
-+#define __ia64_ttag		__ttag
-+#define __ia64_itcd		__itcd
-+#define __ia64_itci		__itci
-+#define __ia64_itrd		__itrd
-+#define __ia64_itri		__itri
-+#define __ia64_ptce		__ptce
-+#define __ia64_ptcl		__ptcl
-+#define __ia64_ptcg		__ptcg
-+#define __ia64_ptcga		__ptcga
-+#define __ia64_ptri		__ptri
-+#define __ia64_ptrd		__ptrd
- #define ia64_dep_mi		_m64_dep_mi
- 
- /* Values for lfhint in __lfetch and __lfetch_fault */
-@@ -142,16 +142,18 @@
- #define ia64_lfetch_fault	__lfetch_fault
- #define ia64_lfetch_fault_excl	__lfetch_fault_excl
- 
--#define ia64_intrin_local_irq_restore(x)		\
-+#define __ia64_intrin_local_irq_restore(x)		\
- do {							\
- 	if ((x) != 0) {					\
--		ia64_ssm(IA64_PSR_I);			\
-+		__ia64_ssm(IA64_PSR_I);			\
- 		ia64_srlz_d();				\
- 	} else {					\
--		ia64_rsm(IA64_PSR_I);			\
-+		__ia64_rsm(IA64_PSR_I);			\
- 	}						\
- } while (0)
- 
-+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++/*
++ * Talk about misusing macros..
++ */
++#define __OUT1(s,x) \
++static inline void out##s(unsigned x value, unsigned short port) {
 +
- #define __builtin_trap()	__break(0);
- 
- #endif /* _ASM_IA64_INTEL_INTRIN_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/io.h tmp-linux-2.6-xen.patch/include/asm-ia64/io.h
---- pristine-linux-2.6.18.2/include/asm-ia64/io.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/io.h	2007-07-30 16:35:13.000000000 +0200
-@@ -66,9 +66,11 @@ extern unsigned int num_io_spaces;
- #define PIO_RESERVED		__IA64_UNCACHED_OFFSET
- #define HAVE_ARCH_PIO_SIZE
- 
-+#include <asm/hypervisor.h>
- #include <asm/intrinsics.h>
- #include <asm/machvec.h>
- #include <asm/page.h>
-+#include <asm/privop.h>
- #include <asm/system.h>
- #include <asm-generic/iomap.h>
- 
-@@ -96,9 +98,44 @@ extern int valid_mmap_phys_addr_range (u
-  * The following two macros are deprecated and scheduled for removal.
-  * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
-  */
-+#ifndef CONFIG_XEN
- #define bus_to_virt	phys_to_virt
- #define virt_to_bus	virt_to_phys
- #define page_to_bus	page_to_phys
-+#else
-+#define bus_to_virt(bus)	\
-+	phys_to_virt(machine_to_phys_for_dma(bus))
-+#define virt_to_bus(virt)	\
-+	phys_to_machine_for_dma(virt_to_phys(virt))
-+#define page_to_bus(page)	\
-+	phys_to_machine_for_dma(page_to_pseudophys(page))
++#define __OUT2(s,s1,s2) \
++__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
 +
-+#define page_to_pseudophys(page) \
-+	((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define __OUT(s,s1,x) \
++__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
++__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
++
++#define __IN1(s) \
++static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
++
++#define __IN2(s,s1,s2) \
++__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
++
++#define __IN(s,s1,i...) \
++__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++
++#define __INS(s) \
++static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; ins" #s \
++: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define __OUTS(s) \
++static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; outs" #s \
++: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define RETURN_TYPE unsigned char
++__IN(b,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned short
++__IN(w,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned int
++__IN(l,"")
++#undef RETURN_TYPE
++
++__OUT(b,"b",char)
++__OUT(w,"w",short)
++__OUT(l,,int)
++
++__INS(b)
++__INS(w)
++__INS(l)
++
++__OUTS(b)
++__OUTS(w)
++__OUTS(l)
++
++#define IO_SPACE_LIMIT 0xffff
++
++#if defined(__KERNEL__) && __x86_64__
++
++#include <linux/vmalloc.h>
 +
++#ifndef __i386__
 +/*
-+ * Drivers that use page_to_phys() for bus addresses are broken.
-+ * This includes:
-+ * drivers/ide/cris/ide-cris.c
-+ * drivers/scsi/dec_esp.c
++ * Change virtual addresses to physical addresses and vv.
++ * These are pretty trivial
 + */
-+#define page_to_phys(page)	(page_to_pseudophys(page))
-+#define bvec_to_bus(bv)		(page_to_bus((bv)->bv_page) + \
-+				(unsigned long) (bv)->bv_offset)
-+#define bio_to_pseudophys(bio)	(page_to_pseudophys(bio_page((bio))) +	\
-+				 (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)  (page_to_pseudophys((bv)->bv_page) +	\
-+				 (unsigned long) (bv)->bv_offset)
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
-+	(((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) ==		\
-+	  bvec_to_pseudophys((vec2))))
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++	return __pa(address);
++}
 +
-+/* We will be supplying our own /dev/mem implementation */
-+#define ARCH_HAS_DEV_MEM
-+#define ARCH_HAS_DEV_MEM_MMAP_MEM
-+int xen_mmap_mem(struct file * file, struct vm_area_struct * vma);
-+#endif /* CONFIG_XEN */
- 
- # endif /* KERNEL */
- 
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/iosapic.h tmp-linux-2.6-xen.patch/include/asm-ia64/iosapic.h
---- pristine-linux-2.6.18.2/include/asm-ia64/iosapic.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/iosapic.h	2007-07-30 16:35:13.000000000 +0200
-@@ -53,6 +53,7 @@
- 
- #define NR_IOSAPICS			256
- 
-+#ifndef CONFIG_XEN
- static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
- {
- 	writel(reg, iosapic + IOSAPIC_REG_SELECT);
-@@ -64,6 +65,7 @@ static inline void iosapic_write(char __
- 	writel(reg, iosapic + IOSAPIC_REG_SELECT);
- 	writel(val, iosapic + IOSAPIC_WINDOW);
- }
-+#endif
- 
- static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
- {
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/irq.h tmp-linux-2.6-xen.patch/include/asm-ia64/irq.h
---- pristine-linux-2.6.18.2/include/asm-ia64/irq.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/irq.h	2007-07-30 16:35:13.000000000 +0200
-@@ -11,8 +11,41 @@
-  * 02/29/00     D.Mosberger	moved most things into hw_irq.h
-  */
- 
-+#ifndef CONFIG_XEN
- #define NR_IRQS		256
- #define NR_IRQ_VECTORS	NR_IRQS
-+#else
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
-+ *     if we have physical device-access privilege. This region is at the 
-+ *     start of the IRQ space so that existing device drivers do not need
-+ *     to be modified to translate physical IRQ numbers into our IRQ space.
-+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ *     are bound using the provided bind/unbind functions.
++static inline void * phys_to_virt(unsigned long address)
++{
++	return __va(address);
++}
++
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++#endif
++
++/*
++ * Change "struct page" to physical address.
 + */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
 +
-+#define PIRQ_BASE		0
-+#define NR_PIRQS		256
++#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
++				  (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
++				  (unsigned long) (bv)->bv_offset)
 +
-+#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS		256
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
++	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++	  bvec_to_pseudophys((vec2))))
 +
-+#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS		NR_IRQS
++#include <asm-generic/iomap.h>
 +
-+#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
++extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 +
-+#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
++static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
++{
++	return __ioremap(offset, size, 0);
++}
 +
-+#define RESCHEDULE_VECTOR	0
-+#define IPI_VECTOR		1
-+#define CMCP_VECTOR		2
-+#define CPEP_VECTOR		3
-+#define NR_IPIS			4
-+#endif /* CONFIG_XEN */
- 
- static __inline__ int
- irq_canonicalize (int irq)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/machvec_dig.h tmp-linux-2.6-xen.patch/include/asm-ia64/machvec_dig.h
---- pristine-linux-2.6.18.2/include/asm-ia64/machvec_dig.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/machvec_dig.h	2007-07-30 16:35:13.000000000 +0200
-@@ -13,4 +13,19 @@ extern ia64_mv_setup_t dig_setup;
- #define platform_name		"dig"
- #define platform_setup		dig_setup
- 
-+#ifdef CONFIG_XEN
-+# define platform_dma_map_sg		dma_map_sg
-+# define platform_dma_unmap_sg		dma_unmap_sg
-+# define platform_dma_mapping_error	dma_mapping_error
-+# define platform_dma_supported		dma_supported
-+# define platform_dma_alloc_coherent	dma_alloc_coherent
-+# define platform_dma_free_coherent	dma_free_coherent
-+# define platform_dma_map_single	dma_map_single
-+# define platform_dma_unmap_single	dma_unmap_single
-+# define platform_dma_sync_single_for_cpu \
-+					dma_sync_single_for_cpu
-+# define platform_dma_sync_single_for_device \
-+					dma_sync_single_for_device
-+#endif
-+
- #endif /* _ASM_IA64_MACHVEC_DIG_h */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/maddr.h tmp-linux-2.6-xen.patch/include/asm-ia64/maddr.h
---- pristine-linux-2.6.18.2/include/asm-ia64/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/maddr.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,98 @@
-+#ifndef _ASM_IA64_MADDR_H
-+#define _ASM_IA64_MADDR_H
++extern void *bt_ioremap(unsigned long addr, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++#define early_ioremap bt_ioremap
++#define early_iounmap bt_iounmap
 +
-+#include <linux/kernel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
-+#include <xen/interface/xen.h>
++/*
++ * This one maps high address device memory and turns off caching for that area.
++ * it's useful if some control registers are in such an area and write combining
++ * or read caching is not desirable:
++ */
++extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
 +
-+#ifdef CONFIG_XEN
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
 +
-+#define INVALID_P2M_ENTRY       (~0UL)
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
 +
-+#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
-+extern int p2m_initialized;
-+extern unsigned long p2m_min_low_pfn;
-+extern unsigned long p2m_max_low_pfn;
-+extern unsigned long p2m_convert_min_pfn;
-+extern unsigned long p2m_convert_max_pfn;
-+extern volatile const pte_t* p2m_pte;
-+unsigned long p2m_phystomach(unsigned long gpfn);
-+#else
-+#define p2m_initialized		(0)
-+#define p2m_phystomach(gpfn)	INVALID_MFN
-+#endif
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
 +
-+/* XXX xen page size != page size */
-+static inline unsigned long
-+pfn_to_mfn_for_dma(unsigned long pfn)
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline __u8 __readb(const volatile void __iomem *addr)
 +{
-+	unsigned long mfn;
-+	if (p2m_initialized)
-+		return p2m_phystomach(pfn);
-+	mfn = HYPERVISOR_phystomach(pfn);
-+	BUG_ON(mfn == 0); // XXX
-+	BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
-+	BUG_ON(mfn == INVALID_MFN);
-+	return mfn;
++	return *(__force volatile __u8 *)addr;
 +}
-+
-+static inline unsigned long
-+phys_to_machine_for_dma(unsigned long phys)
++static inline __u16 __readw(const volatile void __iomem *addr)
 +{
-+	unsigned long machine =
-+	              pfn_to_mfn_for_dma(phys >> PAGE_SHIFT) << PAGE_SHIFT;
-+	machine |= (phys & ~PAGE_MASK);
-+	return machine;
++	return *(__force volatile __u16 *)addr;
 +}
-+
-+static inline unsigned long
-+mfn_to_pfn_for_dma(unsigned long mfn)
++static __always_inline __u32 __readl(const volatile void __iomem *addr)
 +{
-+	unsigned long pfn;
-+	pfn = HYPERVISOR_machtophys(mfn);
-+	BUG_ON(pfn == 0);
-+	//BUG_ON(pfn == INVALID_M2P_ENTRY);
-+	return pfn;
++	return *(__force volatile __u32 *)addr;
 +}
-+
-+static inline unsigned long
-+machine_to_phys_for_dma(unsigned long machine)
++static inline __u64 __readq(const volatile void __iomem *addr)
 +{
-+	unsigned long phys =
-+	              mfn_to_pfn_for_dma(machine >> PAGE_SHIFT) << PAGE_SHIFT;
-+	phys |= (machine & ~PAGE_MASK);
-+	return phys;
++	return *(__force volatile __u64 *)addr;
 +}
++#define readb(x) __readb(x)
++#define readw(x) __readw(x)
++#define readl(x) __readl(x)
++#define readq(x) __readq(x)
++#define readb_relaxed(a) readb(a)
++#define readw_relaxed(a) readw(a)
++#define readl_relaxed(a) readl(a)
++#define readq_relaxed(a) readq(a)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_readq readq
 +
-+static inline unsigned long
-+mfn_to_local_pfn(unsigned long mfn)
++#define mmiowb()
++
++static inline void __writel(__u32 b, volatile void __iomem *addr)
 +{
-+	unsigned long pfn = mfn_to_pfn_for_dma(mfn);
-+	if (!pfn_valid(pfn))
-+		return INVALID_P2M_ENTRY;
-+	return pfn;
++	*(__force volatile __u32 *)addr = b;
++}
++static inline void __writeq(__u64 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u64 *)addr = b;
++}
++static inline void __writeb(__u8 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u8 *)addr = b;
++}
++static inline void __writew(__u16 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u16 *)addr = b;
 +}
++#define writeq(val,addr) __writeq((val),(addr))
++#define writel(val,addr) __writel((val),(addr))
++#define writew(val,addr) __writew((val),(addr))
++#define writeb(val,addr) __writeb((val),(addr))
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++#define __raw_writeq writeq
 +
-+#else /* !CONFIG_XEN */
++void __memcpy_fromio(void*,unsigned long,unsigned);
++void __memcpy_toio(unsigned long,const void*,unsigned);
 +
-+#define pfn_to_mfn_for_dma(pfn) (pfn)
-+#define mfn_to_pfn_for_dma(mfn) (mfn)
-+#define phys_to_machine_for_dma(phys) (phys)
-+#define machine_to_phys_for_dma(machine) (machine)
-+#define mfn_to_local_pfn(mfn) (mfn)
++static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
++{
++	__memcpy_fromio(to,(unsigned long)from,len);
++}
++static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
++{
++	__memcpy_toio((unsigned long)to,from,len);
++}
 +
-+#endif /* !CONFIG_XEN */
++void memset_io(volatile void __iomem *a, int b, size_t c);
 +
-+#define mfn_to_pfn(mfn) (mfn)
-+#define pfn_to_mfn(pfn) (pfn)
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
 +
-+#define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
-+#define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
-+#define virt_to_machine(virt) __pa(virt) // for tpmfront.c
++/*
++ * Again, x86-64 does not require mem IO specific function.
++ */
 +
-+#define set_phys_to_machine(pfn, mfn) do { } while (0)
++#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void *)(b),(c),(d))
 +
-+typedef unsigned long maddr_t;	// to compile netback, netfront
++/**
++ *	check_signature		-	find BIOS signatures
++ *	@io_addr: mmio address to check 
++ *	@signature:  signature block
++ *	@length: length of signature
++ *
++ *	Perform a signature comparison with the mmio address io_addr. This
++ *	address should have been obtained by ioremap.
++ *	Returns 1 on a match.
++ */
++ 
++static inline int check_signature(void __iomem *io_addr,
++	const unsigned char *signature, int length)
++{
++	int retval = 0;
++	do {
++		if (readb(io_addr) != *signature)
++			goto out;
++		io_addr++;
++		signature++;
++		length--;
++	} while (length);
++	retval = 1;
++out:
++	return retval;
++}
 +
-+#endif /* _ASM_IA64_MADDR_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/meminit.h tmp-linux-2.6-xen.patch/include/asm-ia64/meminit.h
---- pristine-linux-2.6.18.2/include/asm-ia64/meminit.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/meminit.h	2007-07-30 16:35:13.000000000 +0200
-@@ -16,10 +16,15 @@
-  * 	- command line string
-  * 	- kernel code & data
-  * 	- Kernel memory map built from EFI memory map
-+ *	- xen start info
-  *
-  * More could be added if necessary
-  */
-+#ifndef CONFIG_XEN
- #define IA64_MAX_RSVD_REGIONS 6
-+#else
-+#define IA64_MAX_RSVD_REGIONS 7
-+#endif
- 
- struct rsvd_region {
- 	unsigned long start;	/* virtual address of beginning of element */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/page.h tmp-linux-2.6-xen.patch/include/asm-ia64/page.h
---- pristine-linux-2.6.18.2/include/asm-ia64/page.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/page.h	2007-07-30 16:35:13.000000000 +0200
-@@ -119,6 +119,7 @@ extern struct page *vmem_map;
- #endif
- 
- #ifdef CONFIG_FLATMEM
-+extern unsigned long max_mapnr;
- # define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
- #elif defined(CONFIG_DISCONTIGMEM)
- extern unsigned long min_low_pfn;
-@@ -126,7 +127,9 @@ extern unsigned long max_low_pfn;
- # define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
- #endif
- 
-+#ifndef CONFIG_XEN
- #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
-+#endif
- #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
- #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
- 
-@@ -227,5 +230,25 @@ get_order (unsigned long size)
- 					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\
- 					  ? VM_EXEC : 0))
- 
-+#ifndef __ASSEMBLY__
-+#ifdef CONFIG_XEN
++/* Nothing to do */
 +
-+#include <linux/kernel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>	// to compile netback, netfront
-+#include <asm/maddr.h>
++#define dma_cache_inv(_start,_size)		do { } while (0)
++#define dma_cache_wback(_start,_size)		do { } while (0)
++#define dma_cache_wback_inv(_start,_size)	do { } while (0)
 +
-+#define arch_free_page(_page, _order)		\
-+({						\
-+	int foreign = PageForeign(_page);	\
-+	if (foreign)                            \
-+		PageForeignDestructor(_page);   \
-+	foreign;                                \
-+})
-+#define HAVE_ARCH_FREE_PAGE
++#define flush_write_buffers() 
 +
-+#endif /* CONFIG_XEN */
-+#endif /* __ASSEMBLY__ */
++extern int iommu_bio_merge;
++#define BIO_VMERGE_BOUNDARY iommu_bio_merge
 +
- # endif /* __KERNEL__ */
- #endif /* _ASM_IA64_PAGE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/pal.h tmp-linux-2.6-xen.patch/include/asm-ia64/pal.h
---- pristine-linux-2.6.18.2/include/asm-ia64/pal.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/pal.h	2007-07-30 16:35:13.000000000 +0200
-@@ -82,6 +82,7 @@
- #ifndef __ASSEMBLY__
- 
- #include <linux/types.h>
-+#include <asm/processor.h>
- #include <asm/fpu.h>
- 
- /*
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/pgalloc.h tmp-linux-2.6-xen.patch/include/asm-ia64/pgalloc.h
---- pristine-linux-2.6.18.2/include/asm-ia64/pgalloc.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/pgalloc.h	2007-07-30 16:35:13.000000000 +0200
-@@ -125,7 +125,11 @@ static inline void pmd_free(pmd_t * pmd)
- static inline void
- pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
- {
-+#ifndef CONFIG_XEN
- 	pmd_val(*pmd_entry) = page_to_phys(pte);
-+#else
-+	pmd_val(*pmd_entry) = page_to_pseudophys(pte);
-+#endif
- }
- 
- static inline void
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/privop.h tmp-linux-2.6-xen.patch/include/asm-ia64/privop.h
---- pristine-linux-2.6.18.2/include/asm-ia64/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/privop.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,60 @@
-+#ifndef _ASM_IA64_PRIVOP_H
-+#define _ASM_IA64_PRIVOP_H
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p)	__va(p)
 +
 +/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at hp.com>
-+ *
++ * Convert a virtual cached pointer to an uncached pointer
 + */
++#define xlate_dev_kmem_ptr(p)	p
 +
-+#ifdef CONFIG_XEN
-+#include <asm/xen/privop.h>
-+#endif
++#endif /* __KERNEL__ */
 +
-+#ifndef __ASSEMBLY
++#define ARCH_HAS_DEV_MEM
 +
-+#ifndef IA64_PARAVIRTUALIZED
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/irq.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/irq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,38 @@
++#ifndef _ASM_IRQ_H
++#define _ASM_IRQ_H
 +
-+#define ia64_getreg			__ia64_getreg
-+#define ia64_setreg			__ia64_setreg
-+#define ia64_hint			__ia64_hint
-+#define ia64_thash			__ia64_thash
-+#define ia64_itci			__ia64_itci
-+#define ia64_itcd			__ia64_itcd
-+#define ia64_itri			__ia64_itri
-+#define ia64_itrd			__ia64_itrd
-+#define ia64_tpa			__ia64_tpa
-+#define ia64_set_ibr			__ia64_set_ibr
-+#define ia64_set_pkr			__ia64_set_pkr
-+#define ia64_set_pmc			__ia64_set_pmc
-+#define ia64_set_pmd			__ia64_set_pmd
-+#define ia64_set_rr			__ia64_set_rr
-+#define ia64_get_cpuid			__ia64_get_cpuid
-+#define ia64_get_ibr			__ia64_get_ibr
-+#define ia64_get_pkr			__ia64_get_pkr
-+#define ia64_get_pmc			__ia64_get_pmc
-+#define ia64_get_pmd			__ia64_get_pmd
-+#define ia64_get_rr			__ia64_get_rr
-+#define ia64_fc				__ia64_fc
-+#define ia64_ssm			__ia64_ssm
-+#define ia64_rsm			__ia64_rsm
-+#define ia64_ptce			__ia64_ptce
-+#define ia64_ptcga			__ia64_ptcga
-+#define ia64_ptcl			__ia64_ptcl
-+#define ia64_ptri			__ia64_ptri
-+#define ia64_ptrd			__ia64_ptrd
-+#define ia64_get_psr_i			__ia64_get_psr_i
-+#define ia64_intrin_local_irq_restore	__ia64_intrin_local_irq_restore
-+#define ia64_pal_halt_light		__ia64_pal_halt_light
-+#define ia64_leave_kernel		__ia64_leave_kernel
-+#define ia64_leave_syscall		__ia64_leave_syscall
-+#define ia64_trace_syscall		__ia64_trace_syscall
-+#define ia64_ret_from_clone		__ia64_ret_from_clone
-+#define ia64_switch_to			__ia64_switch_to
-+#define ia64_pal_call_static		__ia64_pal_call_static
++/*
++ *	linux/include/asm/irq.h
++ *
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
++ */
 +
-+#endif /* !IA64_PARAVIRTUALIZED */
++#include <linux/sched.h>
++/* include comes from machine specific directory */
++#include "irq_vectors.h"
++#include <asm/thread_info.h>
 +
-+#endif /* !__ASSEMBLY */
++static __inline__ int irq_canonicalize(int irq)
++{
++	return ((irq == 2) ? 9 : irq);
++}
 +
-+#endif /* _ASM_IA64_PRIVOP_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/processor.h tmp-linux-2.6-xen.patch/include/asm-ia64/processor.h
---- pristine-linux-2.6.18.2/include/asm-ia64/processor.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/processor.h	2007-07-30 16:35:13.000000000 +0200
-@@ -18,6 +18,7 @@
- #include <asm/kregs.h>
- #include <asm/ptrace.h>
- #include <asm/ustack.h>
-+#include <asm/privop.h>
- 
- #define IA64_NUM_DBG_REGS	8
- /*
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/sal.h tmp-linux-2.6-xen.patch/include/asm-ia64/sal.h
---- pristine-linux-2.6.18.2/include/asm-ia64/sal.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/sal.h	2007-09-30 18:06:18.000000000 +0200
-@@ -42,6 +42,9 @@
- #include <asm/pal.h>
- #include <asm/system.h>
- #include <asm/fpu.h>
-+#ifdef CONFIG_XEN
-+#include <asm/xen/xencomm.h>
-+#endif
- 
- extern spinlock_t sal_lock;
- 
-@@ -686,10 +689,28 @@ ia64_sal_clear_state_info (u64 sal_info_
- /* Get the processor and platform information logged by SAL with respect to the machine
-  * state at the time of the MCAs, INITs, CMCs, or CPEs.
-  */
-+#ifdef CONFIG_XEN
-+static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type);
++#ifdef CONFIG_X86_LOCAL_APIC
++#define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
 +#endif
 +
- static inline u64
- ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
- {
- 	struct ia64_sal_retval isrv;
-+#ifdef CONFIG_XEN
-+	if (is_running_on_xen()) {
-+		struct xencomm_handle *desc;
++#define KDB_VECTOR	0xf9
 +
-+		if (xencomm_create(sal_info,
-+		                   ia64_sal_get_state_info_size(sal_info_type),
-+		                   &desc, GFP_ATOMIC))
-+			return 0;
++# define irq_ctx_init(cpu) do { } while (0)
 +
-+		SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
-+		                   desc, 0, 0, 0, 0);
-+		xencomm_free(desc);
-+	} else
++#ifdef CONFIG_HOTPLUG_CPU
++#include <linux/cpumask.h>
++extern void fixup_irqs(cpumask_t map);
 +#endif
- 	SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
- 	              sal_info, 0, 0, 0, 0);
- 	if (isrv.status)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/swiotlb.h tmp-linux-2.6-xen.patch/include/asm-ia64/swiotlb.h
---- pristine-linux-2.6.18.2/include/asm-ia64/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/swiotlb.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,41 @@
-+#ifndef _ASM_SWIOTLB_H
-+#define _ASM_SWIOTLB_H 1
-+
-+/* SWIOTLB interface */
 +
-+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
-+				      int dir);
-+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-+				  size_t size, int dir);
-+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-+					 dma_addr_t dev_addr,
-+					 size_t size, int dir);
-+extern void swiotlb_sync_single_for_device(struct device *hwdev,
-+					    dma_addr_t dev_addr,
-+					    size_t size, int dir);
-+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-+				     struct scatterlist *sg, int nelems,
-+				     int dir);
-+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-+					struct scatterlist *sg, int nelems,
-+					int dir);
-+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, int direction);
-+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, int direction);
-+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+                                   unsigned long offset, size_t size,
-+                                   enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+                               size_t size, enum dma_data_direction direction);
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+extern void swiotlb_init(void);
++#define __ARCH_HAS_DO_SOFTIRQ 1
 +
-+#ifdef CONFIG_SWIOTLB
-+extern int swiotlb;
-+#else
-+#define swiotlb 0
-+#endif
++#endif /* _ASM_IRQ_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/irqflags.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/irqflags.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,139 @@
++/*
++ * include/asm-x86_64/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/synch_bitops.h tmp-linux-2.6-xen.patch/include/asm-ia64/synch_bitops.h
---- pristine-linux-2.6.18.2/include/asm-ia64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/synch_bitops.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,61 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
++#ifndef __ASSEMBLY__
++/*
++ * Interrupt control:
++ */
 +
 +/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
 + */
 +
-+#define ADDR (*(volatile long *) addr)
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
 +
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+	set_bit(nr, addr);
-+}
++#define raw_local_save_flags(flags) \
++		do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x)					\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	_vcpu = current_vcpu_info();		\
++	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
++		barrier(); /* unmask then check (avoid races) */	\
++		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
++			force_evtchn_callback();			\
++	}								\
++} while (0)
 +
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+	clear_bit(nr, addr);
-+}
++#ifdef CONFIG_X86_VSMP
 +
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+	change_bit(nr, addr);
-+}
++/*
++ * Interrupt control for the VSMP architecture:
++ */
 +
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++static inline void raw_local_irq_disable(void)
 +{
-+    return test_and_set_bit(nr, addr);
-+}
++	unsigned long flags = __raw_local_save_flags();
 +
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+    return test_and_clear_bit(nr, addr);
++	raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
 +}
 +
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++static inline void raw_local_irq_enable(void)
 +{
-+    return test_and_change_bit(nr, addr);
-+}
++	unsigned long flags = __raw_local_save_flags();
 +
-+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-+{
-+    return test_bit(nr, addr);
++	raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
 +}
 +
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++static inline int raw_irqs_disabled_flags(unsigned long flags)
 +{
-+    return test_bit(nr, addr);
++	return !(flags & (1<<9)) || (flags & (1 << 18));
 +}
 +
-+#define synch_cmpxchg	ia64_cmpxchg4_acq
++#else /* CONFIG_X86_VSMP */
 +
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
++#define raw_local_irq_disable()						\
++do {									\
++	current_vcpu_info()->evtchn_upcall_mask = 1;					\
++	barrier();							\
++} while (0)
 +
-+#define synch_cmpxchg_subword synch_cmpxchg
++#define raw_local_irq_enable()						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	_vcpu = current_vcpu_info();		\
++	_vcpu->evtchn_upcall_mask = 0;					\
++	barrier(); /* unmask then check (avoid races) */		\
++	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
++		force_evtchn_callback();				\
++} while (0)
 +
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/system.h tmp-linux-2.6-xen.patch/include/asm-ia64/system.h
---- pristine-linux-2.6.18.2/include/asm-ia64/system.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/system.h	2007-07-30 16:35:13.000000000 +0200
-@@ -123,7 +123,7 @@ extern struct ia64_boot_param {
- #define __local_irq_save(x)			\
- do {						\
- 	ia64_stop();				\
--	(x) = ia64_getreg(_IA64_REG_PSR);	\
-+	(x) = ia64_get_psr_i();			\
- 	ia64_stop();				\
- 	ia64_rsm(IA64_PSR_I);			\
- } while (0)
-@@ -171,7 +171,7 @@ do {								\
- #endif /* !CONFIG_IA64_DEBUG_IRQ */
- 
- #define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
--#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
-+#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_get_psr_i(); })
- 
- #define irqs_disabled()				\
- ({						\
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/uaccess.h tmp-linux-2.6-xen.patch/include/asm-ia64/uaccess.h
---- pristine-linux-2.6.18.2/include/asm-ia64/uaccess.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/uaccess.h	2007-07-30 16:35:13.000000000 +0200
-@@ -365,6 +365,7 @@ ia64_done_with_exception (struct pt_regs
- }
- 
- #define ARCH_HAS_TRANSLATE_MEM_PTR	1
-+#ifndef CONFIG_XEN
- static __inline__ char *
- xlate_dev_mem_ptr (unsigned long p)
- {
-@@ -379,6 +380,25 @@ xlate_dev_mem_ptr (unsigned long p)
- 
- 	return ptr;
- }
-+#else
-+static __inline__ char *
-+xlate_dev_mem_ptr (unsigned long p, ssize_t sz)
++static inline int raw_irqs_disabled_flags(unsigned long flags)
 +{
-+	unsigned long pfn = p >> PAGE_SHIFT;
-+
-+	if (pfn_valid(pfn) && !PageUncached(pfn_to_page(pfn)))
-+		return __va(p);
-+
-+	return ioremap(p, sz);
++	return (flags != 0);
 +}
 +
-+static __inline__ void
-+xlate_dev_mem_ptr_unmap (char* v)
-+{
-+	if (REGION_NUMBER(v) == RGN_UNCACHED)
-+		iounmap(v);
-+}
 +#endif
- 
- /*
-  * Convert a virtual cached kernel memory pointer to an uncached pointer
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/xen/privop.h tmp-linux-2.6-xen.patch/include/asm-ia64/xen/privop.h
---- pristine-linux-2.6.18.2/include/asm-ia64/xen/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/xen/privop.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,310 @@
-+#ifndef _ASM_IA64_XEN_PRIVOP_H
-+#define _ASM_IA64_XEN_PRIVOP_H
 +
 +/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at hp.com>
-+ *
-+ * Paravirtualizations of privileged operations for Xen/ia64
-+ *
++ * For spinlocks, etc.:
 + */
 +
++#define __raw_local_irq_save()						\
++({									\
++	unsigned long flags = __raw_local_save_flags();			\
++									\
++	raw_local_irq_disable();					\
++									\
++	flags;								\
++})
 +
-+#include <xen/interface/arch-ia64.h>
-+
-+#define IA64_PARAVIRTUALIZED
++#define raw_local_irq_save(flags) \
++		do { (flags) = __raw_local_irq_save(); } while (0)
 +
-+/* At 1 MB, before per-cpu space but still addressable using addl instead
-+   of movl. */
-+#define XSI_BASE				0xfffffffffff00000
++#define raw_irqs_disabled()						\
++({									\
++	unsigned long flags = __raw_local_save_flags();			\
++									\
++	raw_irqs_disabled_flags(flags);					\
++})
 +
-+/* Address of mapped regs.  */
-+#define XMAPPEDREGS_BASE		(XSI_BASE + XSI_SIZE)
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
 +
-+#ifdef __ASSEMBLY__
-+#define	XEN_HYPER_RFI			break HYPERPRIVOP_RFI
-+#define	XEN_HYPER_RSM_PSR_DT		break HYPERPRIVOP_RSM_DT
-+#define	XEN_HYPER_SSM_PSR_DT		break HYPERPRIVOP_SSM_DT
-+#define	XEN_HYPER_COVER			break HYPERPRIVOP_COVER
-+#define	XEN_HYPER_ITC_D			break HYPERPRIVOP_ITC_D
-+#define	XEN_HYPER_ITC_I			break HYPERPRIVOP_ITC_I
-+#define	XEN_HYPER_SSM_I			break HYPERPRIVOP_SSM_I
-+#define	XEN_HYPER_GET_IVR		break HYPERPRIVOP_GET_IVR
-+#define	XEN_HYPER_GET_TPR		break HYPERPRIVOP_GET_TPR
-+#define	XEN_HYPER_SET_TPR		break HYPERPRIVOP_SET_TPR
-+#define	XEN_HYPER_EOI			break HYPERPRIVOP_EOI
-+#define	XEN_HYPER_SET_ITM		break HYPERPRIVOP_SET_ITM
-+#define	XEN_HYPER_THASH			break HYPERPRIVOP_THASH
-+#define	XEN_HYPER_PTC_GA		break HYPERPRIVOP_PTC_GA
-+#define	XEN_HYPER_ITR_D			break HYPERPRIVOP_ITR_D
-+#define	XEN_HYPER_GET_RR		break HYPERPRIVOP_GET_RR
-+#define	XEN_HYPER_SET_RR		break HYPERPRIVOP_SET_RR
-+#define	XEN_HYPER_SET_KR		break HYPERPRIVOP_SET_KR
-+#define	XEN_HYPER_FC			break HYPERPRIVOP_FC
-+#define	XEN_HYPER_GET_CPUID		break HYPERPRIVOP_GET_CPUID
-+#define	XEN_HYPER_GET_PMD		break HYPERPRIVOP_GET_PMD
-+#define	XEN_HYPER_GET_EFLAG		break HYPERPRIVOP_GET_EFLAG
-+#define	XEN_HYPER_SET_EFLAG		break HYPERPRIVOP_SET_EFLAG
-+#define	XEN_HYPER_GET_PSR		break HYPERPRIVOP_GET_PSR
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
 +
-+#define XSI_IFS			(XSI_BASE + XSI_IFS_OFS)
-+#define XSI_PRECOVER_IFS	(XSI_BASE + XSI_PRECOVER_IFS_OFS)
-+#define XSI_IFA			(XSI_BASE + XSI_IFA_OFS)
-+#define XSI_ISR			(XSI_BASE + XSI_ISR_OFS)
-+#define XSI_IIM			(XSI_BASE + XSI_IIM_OFS)
-+#define XSI_ITIR		(XSI_BASE + XSI_ITIR_OFS)
-+#define XSI_PSR_I_ADDR		(XSI_BASE + XSI_PSR_I_ADDR_OFS)
-+#define XSI_PSR_IC		(XSI_BASE + XSI_PSR_IC_OFS)
-+#define XSI_IPSR		(XSI_BASE + XSI_IPSR_OFS)
-+#define XSI_IIP			(XSI_BASE + XSI_IIP_OFS)
-+#define XSI_B1NAT		(XSI_BASE + XSI_B1NATS_OFS)
-+#define XSI_BANK1_R16		(XSI_BASE + XSI_BANK1_R16_OFS)
-+#define XSI_BANKNUM		(XSI_BASE + XSI_BANKNUM_OFS)
-+#define XSI_IHA			(XSI_BASE + XSI_IHA_OFS)
++#else /* __ASSEMBLY__: */
++# ifdef CONFIG_TRACE_IRQFLAGS
++#  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk
++#  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk
++# else
++#  define TRACE_IRQS_ON
++#  define TRACE_IRQS_OFF
++# endif
 +#endif
 +
-+#ifndef __ASSEMBLY__
-+#define	XEN_HYPER_SSM_I		asm("break %0" : : "i" (HYPERPRIVOP_SSM_I))
-+#define	XEN_HYPER_GET_IVR	asm("break %0" : : "i" (HYPERPRIVOP_GET_IVR))
-+
-+/************************************************/
-+/* Instructions paravirtualized for correctness */
-+/************************************************/
-+
-+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
-+ *  may have different semantics depending on whether they are executed
-+ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
-+ *  be allowed to execute directly, lest incorrect semantics result. */
-+extern unsigned long xen_fc(unsigned long addr);
-+#define ia64_fc(addr)			xen_fc((unsigned long)(addr))
-+extern unsigned long xen_thash(unsigned long addr);
-+#define ia64_thash(addr)		xen_thash((unsigned long)(addr))
-+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
-+ * is not currently used (though it may be in a long-format VHPT system!)
-+ * and the semantics of cover only change if psr.ic is off which is very
-+ * rare (and currently non-existent outside of assembly code */
-+
-+/* There are also privilege-sensitive registers.  These registers are
-+ * readable at any privilege level but only writable at PL0. */
-+extern unsigned long xen_get_cpuid(int index);
-+#define	ia64_get_cpuid(i)		xen_get_cpuid(i)
-+extern unsigned long xen_get_pmd(int index);
-+#define	ia64_get_pmd(i)			xen_get_pmd(i)
-+extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
-+extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
-+
-+/************************************************/
-+/* Instructions paravirtualized for performance */
-+/************************************************/
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/maddr.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/maddr.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,161 @@
++#ifndef _X86_64_MADDR_H
++#define _X86_64_MADDR_H
 +
-+/* Xen uses memory-mapped virtual privileged registers for access to many
-+ * performance-sensitive privileged registers.  Some, like the processor
-+ * status register (psr), are broken up into multiple memory locations.
-+ * Others, like "pend", are abstractions based on privileged registers.
-+ * "Pend" is guaranteed to be set if reading cr.ivr would return a
-+ * (non-spurious) interrupt. */
-+#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
-+#define XSI_PSR_I			\
-+	(*XEN_MAPPEDREGS->interrupt_mask_addr)
-+#define xen_get_virtual_psr_i()		\
-+	(!XSI_PSR_I)
-+#define xen_set_virtual_psr_i(_val)	\
-+	({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
-+#define xen_set_virtual_psr_ic(_val)	\
-+	({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
-+#define xen_get_virtual_pend()		\
-+	(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
++#include <xen/features.h>
++#include <xen/interface/xen.h>
 +
-+/* Hyperprivops are "break" instructions with a well-defined API.
-+ * In particular, the virtual psr.ic bit must be off; in this way
-+ * it is guaranteed to never conflict with a linux break instruction.
-+ * Normally, this is done in a xen stub but this one is frequent enough
-+ * that we inline it */
-+#define xen_hyper_ssm_i()						\
-+({									\
-+	XEN_HYPER_SSM_I;						\
-+})
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY	(~0UL)
++#define FOREIGN_FRAME_BIT	(1UL<<63)
++#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
 +
-+/* turning off interrupts can be paravirtualized simply by writing
-+ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
-+#define xen_rsm_i()	xen_set_virtual_psr_i(0)
++/* Definitions for machine and pseudophysical addresses. */
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
 +
-+/* turning on interrupts is a bit more complicated.. write to the
-+ * memory-mapped virtual psr.i bit first (to avoid race condition),
-+ * then if any interrupts were pending, we have to execute a hyperprivop
-+ * to ensure the pending interrupt gets delivered; else we're done! */
-+#define xen_ssm_i()							\
-+({									\
-+	int old = xen_get_virtual_psr_i();				\
-+	if (!old) {							\
-+		if (xen_get_virtual_pend())				\
-+			xen_hyper_ssm_i();				\
-+		else							\
-+			xen_set_virtual_psr_i(1);			\
-+	}								\
-+})
++#ifdef CONFIG_XEN
 +
-+#define xen_ia64_intrin_local_irq_restore(x)				\
-+{									\
-+     if (is_running_on_xen()) {						\
-+	if ((x) & IA64_PSR_I) { xen_ssm_i(); }				\
-+	else { xen_rsm_i(); }						\
-+    }									\
-+    else __ia64_intrin_local_irq_restore((x));				\
-+}
++extern unsigned long *phys_to_machine_mapping;
 +
-+#define	xen_get_psr_i()							\
-+(									\
-+	(is_running_on_xen()) ?						\
-+		(xen_get_virtual_psr_i() ? IA64_PSR_I : 0)		\
-+		: __ia64_get_psr_i()					\
-+)
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int   machine_to_phys_order;
 +
-+#define xen_ia64_ssm(mask)						\
-+{									\
-+	if ((mask)==IA64_PSR_I) {					\
-+		if (is_running_on_xen()) { xen_ssm_i(); }			\
-+		else { __ia64_ssm(mask); }				\
-+	}								\
-+	else { __ia64_ssm(mask); }					\
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return pfn;
++	BUG_ON(end_pfn && pfn >= end_pfn);
++	return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
 +}
 +
-+#define xen_ia64_rsm(mask)						\
-+{									\
-+	if ((mask)==IA64_PSR_I) {					\
-+		if (is_running_on_xen()) { xen_rsm_i(); }			\
-+		else { __ia64_rsm(mask); }				\
-+	}								\
-+	else { __ia64_rsm(mask); }					\
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return 1;
++	BUG_ON(end_pfn && pfn >= end_pfn);
++	return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
 +}
 +
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++	unsigned long pfn;
 +
-+/* Although all privileged operations can be left to trap and will
-+ * be properly handled by Xen, some are frequent enough that we use
-+ * hyperprivops for performance. */
-+
-+extern unsigned long xen_get_psr(void);
-+extern unsigned long xen_get_ivr(void);
-+extern unsigned long xen_get_tpr(void);
-+extern void xen_set_itm(unsigned long);
-+extern void xen_set_tpr(unsigned long);
-+extern void xen_eoi(void);
-+extern void xen_set_rr(unsigned long index, unsigned long val);
-+extern unsigned long xen_get_rr(unsigned long index);
-+extern void xen_set_kr(unsigned long index, unsigned long val);
-+extern void xen_ptcga(unsigned long addr, unsigned long size);
-+
-+/* Note: It may look wrong to test for is_running_on_xen() in each case.
-+ * However regnum is always a constant so, as written, the compiler
-+ * eliminates the switch statement, whereas is_running_on_xen() must be
-+ * tested dynamically. */
-+#define xen_ia64_getreg(regnum)						\
-+({									\
-+	__u64 ia64_intri_res;						\
-+									\
-+	switch(regnum) {						\
-+	case _IA64_REG_PSR:						\
-+		ia64_intri_res = (is_running_on_xen()) ?			\
-+			xen_get_psr() :					\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	case _IA64_REG_CR_IVR:						\
-+		ia64_intri_res = (is_running_on_xen()) ?			\
-+			xen_get_ivr() :					\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	case _IA64_REG_CR_TPR:						\
-+		ia64_intri_res = (is_running_on_xen()) ?			\
-+			xen_get_tpr() :					\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	case _IA64_REG_AR_EFLAG:					\
-+		ia64_intri_res = (is_running_on_xen()) ?			\
-+			xen_get_eflag() :				\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	default:							\
-+		ia64_intri_res = __ia64_getreg(regnum);			\
-+		break;							\
-+	}								\
-+	ia64_intri_res;							\
-+})
-+
-+#define xen_ia64_setreg(regnum,val)					\
-+({									\
-+	switch(regnum) {						\
-+	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:			\
-+		(is_running_on_xen()) ?					\
-+			xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :	\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_ITM:						\
-+		(is_running_on_xen()) ?					\
-+			xen_set_itm(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_TPR:						\
-+		(is_running_on_xen()) ?					\
-+			xen_set_tpr(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_EOI:						\
-+		(is_running_on_xen()) ?					\
-+			xen_eoi() :					\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_AR_EFLAG:					\
-+		(is_running_on_xen()) ?					\
-+			xen_set_eflag(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	default:							\
-+		__ia64_setreg(regnum,val);				\
-+		break;							\
-+	}								\
-+})
-+
-+#define ia64_ssm			xen_ia64_ssm
-+#define ia64_rsm			xen_ia64_rsm
-+#define ia64_intrin_local_irq_restore	xen_ia64_intrin_local_irq_restore
-+#define	ia64_ptcga			xen_ptcga
-+#define	ia64_set_rr(index,val)		xen_set_rr(index,val)
-+#define	ia64_get_rr(index)		xen_get_rr(index)
-+#define ia64_getreg			xen_ia64_getreg
-+#define ia64_setreg			xen_ia64_setreg
-+#define	ia64_get_psr_i			xen_get_psr_i
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return mfn;
 +
-+/* the remainder of these are not performance-sensitive so its
-+ * OK to not paravirtualize and just take a privop trap and emulate */
-+#define ia64_hint			__ia64_hint
-+#define ia64_set_pmd			__ia64_set_pmd
-+#define ia64_itci			__ia64_itci
-+#define ia64_itcd			__ia64_itcd
-+#define ia64_itri			__ia64_itri
-+#define ia64_itrd			__ia64_itrd
-+#define ia64_tpa			__ia64_tpa
-+#define ia64_set_ibr			__ia64_set_ibr
-+#define ia64_set_pkr			__ia64_set_pkr
-+#define ia64_set_pmc			__ia64_set_pmc
-+#define ia64_get_ibr			__ia64_get_ibr
-+#define ia64_get_pkr			__ia64_get_pkr
-+#define ia64_get_pmc			__ia64_get_pmc
-+#define ia64_ptce			__ia64_ptce
-+#define ia64_ptcl			__ia64_ptcl
-+#define ia64_ptri			__ia64_ptri
-+#define ia64_ptrd			__ia64_ptrd
++	if (unlikely((mfn >> machine_to_phys_order) != 0))
++		return end_pfn;
 +
-+#endif /* !__ASSEMBLY__ */
++	/* The array access can fail (e.g., device space beyond end of RAM). */
++	asm (
++		"1:	movq %1,%0\n"
++		"2:\n"
++		".section .fixup,\"ax\"\n"
++		"3:	movq %2,%0\n"
++		"	jmp  2b\n"
++		".previous\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 8\n"
++		"	.quad 1b,3b\n"
++		".previous"
++		: "=r" (pfn)
++		: "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
 +
-+/* these routines utilize privilege-sensitive or performance-sensitive
-+ * privileged instructions so the code must be replaced with
-+ * paravirtualized versions */
-+#define ia64_pal_halt_light		xen_pal_halt_light
-+#define	ia64_leave_kernel		xen_leave_kernel
-+#define	ia64_leave_syscall		xen_leave_syscall
-+#define	ia64_trace_syscall		xen_trace_syscall
-+#define	ia64_ret_from_clone		xen_ret_from_clone
-+#define	ia64_switch_to			xen_switch_to
-+#define	ia64_pal_call_static		xen_pal_call_static
++	return pfn;
++}
 +
-+#endif /* _ASM_IA64_XEN_PRIVOP_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/xen/xcom_hcall.h tmp-linux-2.6-xen.patch/include/asm-ia64/xen/xcom_hcall.h
---- pristine-linux-2.6.18.2/include/asm-ia64/xen/xcom_hcall.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/xen/xcom_hcall.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,88 @@
 +/*
-+ * Copyright (C) 2006 Tristan Gingold <tristan.gingold at bull.net>, Bull SAS
++ * We detect special mappings in one of two ways:
++ *  1. If the MFN is an I/O page then Xen will set the m2p entry
++ *     to be outside our maximum possible pseudophys range.
++ *  2. If the MFN belongs to a different domain then we will certainly
++ *     not have MFN in our p2m table. Conversely, if the page is ours,
++ *     then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
 + *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ * 
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ * 
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
++ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
 + */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++	unsigned long pfn = mfn_to_pfn(mfn);
++	if ((pfn < end_pfn)
++	    && !xen_feature(XENFEAT_auto_translated_physmap)
++	    && (phys_to_machine_mapping[pfn] != mfn))
++		return end_pfn; /* force !pfn_valid() */
++	return pfn;
++}
 +
-+#ifndef _LINUX_XENCOMM_HCALL_H_
-+#define _LINUX_XENCOMM_HCALL_H_
-+
-+/* These function creates inline descriptor for the parameters and
-+   calls the corresponding xencomm_arch_hypercall_X.
-+   Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
-+   they want to use their own wrapper.  */
-+extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++	BUG_ON(end_pfn && pfn >= end_pfn);
++	if (xen_feature(XENFEAT_auto_translated_physmap)) {
++		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++		return;
++	}
++	phys_to_machine_mapping[pfn] = mfn;
++}
 +
-+extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++	return machine;
++}
 +
-+extern int xencomm_hypercall_xen_version(int cmd, void *arg);
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++	return phys;
++}
 +
-+extern int xencomm_hypercall_physdev_op(int cmd, void *op);
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++	maddr_t machine;
++	machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++	return machine;
++}
 +
-+extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
-+                                            unsigned int count);
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++	paddr_t phys;
++	phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++	return phys;
++}
 +
-+extern int xencomm_hypercall_sched_op(int cmd, void *arg);
++#define __pte_ma(x)     ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
 +
-+extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
++#else /* !CONFIG_XEN */
 +
-+extern int xencomm_hypercall_callback_op(int cmd, void *arg);
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
 +
-+extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
++#endif /* !CONFIG_XEN */
 +
-+extern unsigned long xencomm_hypercall_hvm_op(int cmd, void *arg);
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
++#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
 +
-+extern int xencomm_hypercall_suspend(unsigned long srec);
++#endif /* _X86_64_MADDR_H */
 +
-+extern int xencomm_hypercall_xenoprof_op(int op, void *arg);
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/mmu.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/mmu.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,38 @@
++#ifndef __x86_64_MMU_H
++#define __x86_64_MMU_H
 +
-+extern int xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
-+                                        unsigned long count);
++#include <linux/spinlock.h>
++#include <asm/semaphore.h>
 +
-+extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
++/*
++ * The x86_64 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct { 
++	void *ldt;
++	rwlock_t ldtlock; 
++	int size;
++	struct semaphore sem; 
++#ifdef CONFIG_XEN
++	unsigned pinned:1;
++	unsigned has_foreign_mappings:1;
++	struct list_head unpinned;
++#endif
++} mm_context_t;
 +
-+/* Using mini xencomm.  */
-+extern int xencomm_mini_hypercall_console_io(int cmd, int count, char *str);
++#ifdef CONFIG_XEN
++extern struct list_head mm_unpinned;
++extern spinlock_t mm_unpinned_lock;
 +
-+extern int xencomm_mini_hypercall_event_channel_op(int cmd, void *op);
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
 +
-+extern int xencomm_mini_hypercall_xen_version(int cmd, void *arg);
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#endif
 +
-+extern int xencomm_mini_hypercall_physdev_op(int cmd, void *op);
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/mmu_context.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/mmu_context.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,136 @@
++#ifndef __X86_64_MMU_CONTEXT_H
++#define __X86_64_MMU_CONTEXT_H
 +
-+extern int xencomm_mini_hypercall_grant_table_op(unsigned int cmd, void *op,
-+                                                 unsigned int count);
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/page.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
 +
-+extern int xencomm_mini_hypercall_sched_op(int cmd, void *arg);
++/*
++ * possibly do the LDT unload here?
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
 +
-+extern int xencomm_mini_hypercall_multicall(void *call_list, int nr_calls);
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++	if (read_pda(mmu_state) == TLBSTATE_OK) 
++		write_pda(mmu_state, TLBSTATE_LAZY);
++#endif
++}
 +
-+extern int xencomm_mini_hypercall_callback_op(int cmd, void *arg);
++#define prepare_arch_switch(next)	__prepare_arch_switch()
 +
-+extern int xencomm_mini_hypercall_memory_op(unsigned int cmd, void *arg);
++static inline void __prepare_arch_switch(void)
++{
++	/*
++	 * Save away %es, %ds, %fs and %gs. Must happen before reload
++	 * of cr3/ldt (i.e., not in __switch_to).
++	 */
++	__asm__ __volatile__ (
++		"mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
++		: "=m" (current->thread.es),
++		  "=m" (current->thread.ds),
++		  "=m" (current->thread.fsindex),
++		  "=m" (current->thread.gsindex) );
 +
-+extern unsigned long xencomm_mini_hypercall_hvm_op(int cmd, void *arg);
++	if (current->thread.ds)
++		__asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
 +
-+extern int xencomm_mini_hypercall_xenoprof_op(int op, void *arg);
++	if (current->thread.es)
++		__asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
 +
-+extern int xencomm_mini_hypercall_perfmon_op(unsigned long cmd, void* arg,
-+                                             unsigned long count);
++	if (current->thread.fsindex) {
++		__asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
++		current->thread.fs = 0;
++	}
 +
-+/* For privcmd.  Locally declare argument type to avoid include storm.
-+   Type coherency will be checked within privcmd.c  */
-+struct privcmd_hypercall;
-+extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
++	if (current->thread.gsindex) {
++		load_gs_index(0);
++		current->thread.gs = 0;
++	}
++}
 +
-+#endif /* _LINUX_XENCOMM_HCALL_H_ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/xen/xencomm.h tmp-linux-2.6-xen.patch/include/asm-ia64/xen/xencomm.h
---- pristine-linux-2.6.18.2/include/asm-ia64/xen/xencomm.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/xen/xencomm.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright (C) 2006 Hollis Blanchard <hollisb at us.ibm.com>, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ * 
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ * 
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-+ */
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
 +
-+#ifndef _LINUX_XENCOMM_H_
-+#define _LINUX_XENCOMM_H_
++static inline void load_cr3(pgd_t *pgd)
++{
++	asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
++		     "memory");
++}
 +
-+#include <xen/interface/xencomm.h>
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
++			     struct task_struct *tsk)
++{
++	unsigned cpu = smp_processor_id();
++	struct mmuext_op _op[3], *op = _op;
 +
-+#define XENCOMM_MINI_ADDRS 3
-+struct xencomm_mini {
-+	struct xencomm_desc _desc;
-+	uint64_t address[XENCOMM_MINI_ADDRS];
-+};
++	if (likely(prev != next)) {
++		BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++		       !next->context.pinned);
 +
-+/* Must be called before any hypercall.  */
-+extern void xencomm_init (void);
++		/* stop flush ipis for the previous mm */
++		cpu_clear(cpu, prev->cpu_vm_mask);
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++		write_pda(mmu_state, TLBSTATE_OK);
++		write_pda(active_mm, next);
++#endif
++		cpu_set(cpu, next->cpu_vm_mask);
 +
-+/* To avoid additionnal virt to phys conversion, an opaque structure is
-+   presented.  */
-+struct xencomm_handle;
++		/* load_cr3(next->pgd) */
++		op->cmd = MMUEXT_NEW_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++		op++;
 +
-+extern int xencomm_create(void *buffer, unsigned long bytes,
-+                          struct xencomm_handle **desc, gfp_t type);
-+extern void xencomm_free(struct xencomm_handle *desc);
++		/* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
++		op->cmd = MMUEXT_NEW_USER_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
++		op++;
++		
++		if (unlikely(next->context.ldt != prev->context.ldt)) {
++			/* load_LDT_nolock(&next->context, cpu) */
++			op->cmd = MMUEXT_SET_LDT;
++			op->arg1.linear_addr = (unsigned long)next->context.ldt;
++			op->arg2.nr_ents     = next->context.size;
++			op++;
++		}
 +
-+extern int xencomm_create_mini(struct xencomm_mini *area, int *nbr_area,
-+                               void *buffer, unsigned long bytes,
-+                               struct xencomm_handle **ret);
++		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++	}
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++	else {
++		write_pda(mmu_state, TLBSTATE_OK);
++		if (read_pda(active_mm) != next)
++			out_of_line_bug();
++		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++			/* We were in lazy tlb mode and leave_mm disabled 
++			 * tlb flush IPI delivery. We must reload CR3
++			 * to make sure to use no freed page tables.
++			 */
++                        load_cr3(next->pgd);
++                        xen_new_user_pt(__pa(__user_pgd(next->pgd)));		
++			load_LDT_nolock(&next->context, cpu);
++		}
++	}
++#endif
++}
 +
-+/* Translate virtual address to physical address.  */
-+extern unsigned long xencomm_vaddr_to_paddr(unsigned long vaddr);
++#define deactivate_mm(tsk,mm)	do { \
++	load_gs_index(0); \
++	asm volatile("movl %0,%%fs"::"r"(0));  \
++} while(0)
 +
-+/* Inline version.  To be used only on linear space (kernel space).  */
-+static inline struct xencomm_handle *
-+xencomm_create_inline(void *buffer)
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 +{
-+	unsigned long paddr;
-+
-+	paddr = xencomm_vaddr_to_paddr((unsigned long)buffer);
-+	return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
++	if (!next->context.pinned)
++		mm_pin(next);
++	switch_mm(prev, next, NULL);
 +}
 +
-+#define xen_guest_handle(hnd)  ((hnd).p)
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/msr.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/msr.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,399 @@
++#ifndef X86_64_MSR_H
++#define X86_64_MSR_H 1
 +
-+#endif /* _LINUX_XENCOMM_H_ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-ia64/xenoprof.h tmp-linux-2.6-xen.patch/include/asm-ia64/xenoprof.h
---- pristine-linux-2.6.18.2/include/asm-ia64/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-ia64/xenoprof.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,48 @@
-+/******************************************************************************
-+ * asm-ia64/xenoprof.h
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ *
++#ifndef __ASSEMBLY__
++/*
++ * Access to machine-specific registers (available on 586 and better only)
++ * Note: the rd* operations modify the parameters directly (without using
++ * pointer indirection), this allows gcc to optimize better
 + */
-+#ifndef __ASM_XENOPROF_H__
-+#define __ASM_XENOPROF_H__
-+#ifdef CONFIG_XEN
 +
-+#undef HAVE_XENOPROF_CREATE_FILES
++#define rdmsr(msr,val1,val2) \
++       __asm__ __volatile__("rdmsr" \
++			    : "=a" (val1), "=d" (val2) \
++			    : "c" (msr))
 +
-+struct xenoprof_init;
-+void xenoprof_arch_init_counter(struct xenoprof_init *init);
-+void xenoprof_arch_counter(void);
-+void xenoprof_arch_start(void);
-+void xenoprof_arch_stop(void);
 +
-+struct xenoprof_arch_shared_buffer {
-+	struct resource*	res;
-+};
++#define rdmsrl(msr,val) do { unsigned long a__,b__; \
++       __asm__ __volatile__("rdmsr" \
++			    : "=a" (a__), "=d" (b__) \
++			    : "c" (msr)); \
++       val = a__ | (b__<<32); \
++} while(0)
 +
-+struct xenoprof_shared_buffer;
-+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_get_buffer;
-+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
-+                                    struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_passive;
-+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
-+                              struct xenoprof_shared_buffer* sbuf);
++#define wrmsr(msr,val1,val2) \
++     __asm__ __volatile__("wrmsr" \
++			  : /* no outputs */ \
++			  : "c" (msr), "a" (val1), "d" (val2))
 +
-+#endif /* CONFIG_XEN */
-+#endif /* __ASM_XENOPROF_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-um/page.h tmp-linux-2.6-xen.patch/include/asm-um/page.h
---- pristine-linux-2.6.18.2/include/asm-um/page.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-um/page.h	2007-07-30 16:35:13.000000000 +0200
-@@ -114,7 +114,7 @@ extern unsigned long uml_physmem;
- extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
- #define HAVE_ARCH_VALIDATE
- 
--extern void arch_free_page(struct page *page, int order);
-+extern int arch_free_page(struct page *page, int order);
- #define HAVE_ARCH_FREE_PAGE
- 
- #include <asm-generic/memory_model.h>
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/apic.h tmp-linux-2.6-xen.patch/include/asm-x86_64/apic.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/apic.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/apic.h	2007-07-30 16:35:13.000000000 +0200
-@@ -98,11 +98,13 @@ extern void setup_APIC_extened_lvt(unsig
- extern int disable_timer_pin_1;
- 
- 
-+#ifndef CONFIG_XEN
- void smp_send_timer_broadcast_ipi(void);
- void switch_APIC_timer_to_ipi(void *cpumask);
- void switch_ipi_to_APIC_timer(void *cpumask);
- 
- #define ARCH_APICTIMER_STOPS_ON_C3	1
-+#endif
- 
- #endif /* CONFIG_X86_LOCAL_APIC */
- 
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/kexec.h tmp-linux-2.6-xen.patch/include/asm-x86_64/kexec.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/kexec.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/kexec.h	2007-07-30 16:35:13.000000000 +0200
-@@ -1,6 +1,27 @@
- #ifndef _X86_64_KEXEC_H
- #define _X86_64_KEXEC_H
- 
-+#define PA_CONTROL_PAGE  0
-+#define VA_CONTROL_PAGE  1
-+#define PA_PGD           2
-+#define VA_PGD           3
-+#define PA_PUD_0         4
-+#define VA_PUD_0         5
-+#define PA_PMD_0         6
-+#define VA_PMD_0         7
-+#define PA_PTE_0         8
-+#define VA_PTE_0         9
-+#define PA_PUD_1         10
-+#define VA_PUD_1         11
-+#define PA_PMD_1         12
-+#define VA_PMD_1         13
-+#define PA_PTE_1         14
-+#define VA_PTE_1         15
-+#define PA_TABLE_PAGE    16
-+#define PAGES_NR         17
++#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 
 +
-+#ifndef __ASSEMBLY__
++/* wrmsr with exception handling */
++#define wrmsr_safe(msr,a,b) ({ int ret__;			\
++	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
++		     "1:\n\t"					\
++		     ".section .fixup,\"ax\"\n\t"		\
++		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
++		     ".previous\n\t"				\
++ 		     ".section __ex_table,\"a\"\n"		\
++		     "   .align 8\n\t"				\
++		     "   .quad 	2b,3b\n\t"			\
++		     ".previous"				\
++		     : "=a" (ret__)				\
++		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
++	ret__; })
 +
- #include <linux/string.h>
- 
- #include <asm/page.h>
-@@ -64,4 +85,25 @@ static inline void crash_setup_regs(stru
- 		newregs->rip = (unsigned long)current_text_addr();
- 	}
- }
++#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
 +
-+NORET_TYPE void
-+relocate_kernel(unsigned long indirection_page,
-+		unsigned long page_list,
-+		unsigned long start_address) ATTRIB_NORET;
++#define rdmsr_safe(msr,a,b) \
++	({ int ret__;						\
++	  asm volatile ("1:       rdmsr\n"			\
++                      "2:\n"					\
++                      ".section .fixup,\"ax\"\n"		\
++                      "3:       movl %4,%0\n"			\
++                      " jmp 2b\n"				\
++                      ".previous\n"				\
++                      ".section __ex_table,\"a\"\n"		\
++                      " .align 8\n"				\
++                      " .quad 1b,3b\n"				\
++                      ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
++                      :"c"(msr), "i"(-EIO), "0"(0));		\
++	  ret__; })		
 +
-+/* Under Xen we need to work with machine addresses. These macros give the
-+ * machine address of a certain page to the generic kexec code instead of 
-+ * the pseudo physical address which would be given by the default macros.
-+ */
++#define rdtsc(low,high) \
++     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
 +
-+#ifdef CONFIG_XEN
-+#define KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page)  pfn_to_mfn(page_to_pfn(page))
-+#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn(pfn))
-+#define kexec_virt_to_phys(addr) virt_to_machine(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
-+#endif
++#define rdtscl(low) \
++     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
 +
-+#endif /* __ASSEMBLY__ */
++#define rdtscll(val) do { \
++     unsigned int __a,__d; \
++     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
++     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
++} while(0)
 +
- #endif /* _X86_64_KEXEC_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/agp.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/agp.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/agp.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/agp.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,35 @@
-+#ifndef AGP_H
-+#define AGP_H 1
++#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
 +
-+#include <asm/cacheflush.h>
-+#include <asm/system.h>
++#define rdpmc(counter,low,high) \
++     __asm__ __volatile__("rdpmc" \
++			  : "=a" (low), "=d" (high) \
++			  : "c" (counter))
++
++static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
++			 unsigned int *ecx, unsigned int *edx)
++{
++	__asm__(XEN_CPUID
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++	       	int *edx)
++{
++	__asm__(XEN_CPUID
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op), "c" (count));
++}
 +
 +/*
-+ * Functions to keep the agpgart mappings coherent.
-+ * The GART gives the CPU a physical alias of memory. The alias is
-+ * mapped uncacheable. Make sure there are no conflicting mappings
-+ * with different cachability attributes for the same page.
++ * CPUID functions returning a single datum
 + */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++	unsigned int eax;
 +
-+int map_page_into_agp(struct page *page);
-+int unmap_page_from_agp(struct page *page);
-+#define flush_agp_mappings() global_flush_tlb()
++	__asm__(XEN_CPUID
++		: "=a" (eax)
++		: "0" (op)
++		: "bx", "cx", "dx");
++	return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++	unsigned int eax, ebx;
 +
-+/* Could use CLFLUSH here if the cpu supports it. But then it would
-+   need to be called for each cacheline of the whole page so it may not be
-+   worth it. Would need a page for it. */
-+#define flush_agp_cache() wbinvd()
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=b" (ebx)
++		: "0" (op)
++		: "cx", "dx" );
++	return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++	unsigned int eax, ecx;
 +
-+/* Convert a physical address to an address suitable for the GART. */
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=c" (ecx)
++		: "0" (op)
++		: "bx", "dx" );
++	return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++	unsigned int eax, edx;
++
++	__asm__(XEN_CPUID
++		: "=a" (eax), "=d" (edx)
++		: "0" (op)
++		: "bx", "cx");
++	return edx;
++}
++
++#define MSR_IA32_UCODE_WRITE		0x79
++#define MSR_IA32_UCODE_REV		0x8b
 +
-+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#define alloc_gatt_pages(order)	({                                          \
-+	char *_t; dma_addr_t _d;                                            \
-+	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
-+	_t; })
-+#define free_gatt_pages(table, order)	\
-+	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
 +
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/arch_hooks.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/arch_hooks.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/arch_hooks.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/arch_hooks.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,27 @@
-+#ifndef _ASM_ARCH_HOOKS_H
-+#define _ASM_ARCH_HOOKS_H
 +
-+#include <linux/interrupt.h>
++/* AMD/K8 specific MSRs */ 
++#define MSR_EFER 0xc0000080		/* extended feature register */
++#define MSR_STAR 0xc0000081		/* legacy mode SYSCALL target */
++#define MSR_LSTAR 0xc0000082 		/* long mode SYSCALL target */
++#define MSR_CSTAR 0xc0000083		/* compatibility mode SYSCALL target */
++#define MSR_SYSCALL_MASK 0xc0000084	/* EFLAGS mask for syscall */
++#define MSR_FS_BASE 0xc0000100		/* 64bit GS base */
++#define MSR_GS_BASE 0xc0000101		/* 64bit FS base */
++#define MSR_KERNEL_GS_BASE  0xc0000102	/* SwapGS GS shadow (or USER_GS from kernel) */ 
++/* EFER bits: */ 
++#define _EFER_SCE 0  /* SYSCALL/SYSRET */
++#define _EFER_LME 8  /* Long mode enable */
++#define _EFER_LMA 10 /* Long mode active (read-only) */
++#define _EFER_NX 11  /* No execute enable */
 +
-+/*
-+ *	linux/include/asm/arch_hooks.h
-+ *
-+ *	define the architecture specific hooks 
-+ */
++#define EFER_SCE (1<<_EFER_SCE)
++#define EFER_LME (1<<_EFER_LME)
++#define EFER_LMA (1<<_EFER_LMA)
++#define EFER_NX (1<<_EFER_NX)
++
++/* Intel MSRs. Some also available on other CPUs */
++#define MSR_IA32_TSC		0x10
++#define MSR_IA32_PLATFORM_ID	0x17
++
++#define MSR_IA32_PERFCTR0      0xc1
++#define MSR_IA32_PERFCTR1      0xc2
 +
-+/* these aren't arch hooks, they are generic routines
-+ * that can be used by the hooks */
-+extern void init_ISA_irqs(void);
-+extern void apic_intr_init(void);
-+extern void smp_intr_init(void);
-+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++#define MSR_MTRRcap		0x0fe
++#define MSR_IA32_BBL_CR_CTL        0x119
 +
-+/* these are the defined hooks */
-+extern void intr_init_hook(void);
-+extern void pre_intr_init_hook(void);
-+extern void pre_setup_arch_hook(void);
-+extern void trap_init_hook(void);
-+extern void time_init_hook(void);
-+extern void mca_nmi_hook(void);
++#define MSR_IA32_SYSENTER_CS	0x174
++#define MSR_IA32_SYSENTER_ESP	0x175
++#define MSR_IA32_SYSENTER_EIP	0x176
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/bootsetup.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/bootsetup.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/bootsetup.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/bootsetup.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,42 @@
++#define MSR_IA32_MCG_CAP       0x179
++#define MSR_IA32_MCG_STATUS        0x17a
++#define MSR_IA32_MCG_CTL       0x17b
 +
-+#ifndef _X86_64_BOOTSETUP_H
-+#define _X86_64_BOOTSETUP_H 1
++#define MSR_IA32_EVNTSEL0      0x186
++#define MSR_IA32_EVNTSEL1      0x187
 +
-+#define BOOT_PARAM_SIZE		4096
-+extern char x86_boot_params[BOOT_PARAM_SIZE];
++#define MSR_IA32_DEBUGCTLMSR       0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP  0x1db
++#define MSR_IA32_LASTBRANCHTOIP        0x1dc
++#define MSR_IA32_LASTINTFROMIP     0x1dd
++#define MSR_IA32_LASTINTTOIP       0x1de
 +
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+#define PARAM	((unsigned char *)x86_boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
++#define MSR_MTRRfix64K_00000	0x250
++#define MSR_MTRRfix16K_80000	0x258
++#define MSR_MTRRfix16K_A0000	0x259
++#define MSR_MTRRfix4K_C0000	0x268
++#define MSR_MTRRfix4K_C8000	0x269
++#define MSR_MTRRfix4K_D0000	0x26a
++#define MSR_MTRRfix4K_D8000	0x26b
++#define MSR_MTRRfix4K_E0000	0x26c
++#define MSR_MTRRfix4K_E8000	0x26d
++#define MSR_MTRRfix4K_F0000	0x26e
++#define MSR_MTRRfix4K_F8000	0x26f
++#define MSR_MTRRdefType		0x2ff
 +
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++#define MSR_IA32_MC0_CTL       0x400
++#define MSR_IA32_MC0_STATUS        0x401
++#define MSR_IA32_MC0_ADDR      0x402
++#define MSR_IA32_MC0_MISC      0x403
 +
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-+#define COMMAND_LINE saved_command_line
++#define MSR_P6_PERFCTR0			0xc1
++#define MSR_P6_PERFCTR1			0xc2
++#define MSR_P6_EVNTSEL0			0x186
++#define MSR_P6_EVNTSEL1			0x187
 +
-+#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_PROMPT_FLAG		0x8000
-+#define RAMDISK_LOAD_FLAG		0x4000	
++/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
++#define MSR_K7_EVNTSEL0            0xC0010000
++#define MSR_K7_PERFCTR0            0xC0010004
++#define MSR_K7_EVNTSEL1            0xC0010001
++#define MSR_K7_PERFCTR1            0xC0010005
++#define MSR_K7_EVNTSEL2            0xC0010002
++#define MSR_K7_PERFCTR2            0xC0010006
++#define MSR_K7_EVNTSEL3            0xC0010003
++#define MSR_K7_PERFCTR3            0xC0010007
++#define MSR_K8_TOP_MEM1		   0xC001001A
++#define MSR_K8_TOP_MEM2		   0xC001001D
++#define MSR_K8_SYSCFG		   0xC0010010
++#define MSR_K8_HWCR		   0xC0010015
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/desc.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/desc.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/desc.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,263 @@
-+/* Written 2000 by Andi Kleen */ 
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
++/* K6 MSRs */
++#define MSR_K6_EFER			0xC0000080
++#define MSR_K6_STAR			0xC0000081
++#define MSR_K6_WHCR			0xC0000082
++#define MSR_K6_UWCCR			0xC0000085
++#define MSR_K6_PSOR			0xC0000087
++#define MSR_K6_PFIR			0xC0000088
 +
-+#include <linux/threads.h>
-+#include <asm/ldt.h>
++/* Centaur-Hauls/IDT defined MSRs. */
++#define MSR_IDT_FCR1			0x107
++#define MSR_IDT_FCR2			0x108
++#define MSR_IDT_FCR3			0x109
++#define MSR_IDT_FCR4			0x10a
 +
-+#ifndef __ASSEMBLY__
++#define MSR_IDT_MCR0			0x110
++#define MSR_IDT_MCR1			0x111
++#define MSR_IDT_MCR2			0x112
++#define MSR_IDT_MCR3			0x113
++#define MSR_IDT_MCR4			0x114
++#define MSR_IDT_MCR5			0x115
++#define MSR_IDT_MCR6			0x116
++#define MSR_IDT_MCR7			0x117
++#define MSR_IDT_MCR_CTRL		0x120
 +
-+#include <linux/string.h>
-+#include <linux/smp.h>
++/* VIA Cyrix defined MSRs*/
++#define MSR_VIA_FCR			0x1107
++#define MSR_VIA_LONGHAUL		0x110a
++#define MSR_VIA_RNG			0x110b
++#define MSR_VIA_BCR2			0x1147
 +
-+#include <asm/segment.h>
-+#include <asm/mmu.h>
++/* Intel defined MSRs. */
++#define MSR_IA32_P5_MC_ADDR		0
++#define MSR_IA32_P5_MC_TYPE		1
++#define MSR_IA32_PLATFORM_ID		0x17
++#define MSR_IA32_EBL_CR_POWERON		0x2a
 +
-+// 8 byte segment descriptor
-+struct desc_struct { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
-+	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-+} __attribute__((packed)); 
++#define MSR_IA32_APICBASE               0x1b
++#define MSR_IA32_APICBASE_BSP           (1<<8)
++#define MSR_IA32_APICBASE_ENABLE        (1<<11)
++#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
 +
-+struct n_desc_struct { 
-+	unsigned int a,b;
-+}; 	
++/* P4/Xeon+ specific */
++#define MSR_IA32_MCG_EAX		0x180
++#define MSR_IA32_MCG_EBX		0x181
++#define MSR_IA32_MCG_ECX		0x182
++#define MSR_IA32_MCG_EDX		0x183
++#define MSR_IA32_MCG_ESI		0x184
++#define MSR_IA32_MCG_EDI		0x185
++#define MSR_IA32_MCG_EBP		0x186
++#define MSR_IA32_MCG_ESP		0x187
++#define MSR_IA32_MCG_EFLAGS		0x188
++#define MSR_IA32_MCG_EIP		0x189
++#define MSR_IA32_MCG_RESERVED		0x18A
 +
-+enum { 
-+	GATE_INTERRUPT = 0xE, 
-+	GATE_TRAP = 0xF, 	
-+	GATE_CALL = 0xC,
-+}; 	
++#define MSR_P6_EVNTSEL0			0x186
++#define MSR_P6_EVNTSEL1			0x187
 +
-+// 16byte gate
-+struct gate_struct {          
-+	u16 offset_low;
-+	u16 segment; 
-+	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
-+	u16 offset_middle;
-+	u32 offset_high;
-+	u32 zero1; 
-+} __attribute__((packed));
++#define MSR_IA32_PERF_STATUS		0x198
++#define MSR_IA32_PERF_CTL		0x199
 +
-+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
-+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
++#define MSR_IA32_THERM_CONTROL		0x19a
++#define MSR_IA32_THERM_INTERRUPT	0x19b
++#define MSR_IA32_THERM_STATUS		0x19c
++#define MSR_IA32_MISC_ENABLE		0x1a0
 +
-+enum { 
-+	DESC_TSS = 0x9,
-+	DESC_LDT = 0x2,
-+}; 
++#define MSR_IA32_DEBUGCTLMSR		0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP	0x1db
++#define MSR_IA32_LASTBRANCHTOIP		0x1dc
++#define MSR_IA32_LASTINTFROMIP		0x1dd
++#define MSR_IA32_LASTINTTOIP		0x1de
 +
-+// LDT or TSS descriptor in the GDT. 16 bytes.
-+struct ldttss_desc { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-+	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
-+	u32 base3;
-+	u32 zero1; 
-+} __attribute__((packed)); 
++#define MSR_IA32_MC0_CTL		0x400
++#define MSR_IA32_MC0_STATUS		0x401
++#define MSR_IA32_MC0_ADDR		0x402
++#define MSR_IA32_MC0_MISC		0x403
 +
-+struct desc_ptr {
-+	unsigned short size;
-+	unsigned long address;
-+} __attribute__((packed)) ;
++/* Pentium IV performance counter MSRs */
++#define MSR_P4_BPU_PERFCTR0 		0x300
++#define MSR_P4_BPU_PERFCTR1 		0x301
++#define MSR_P4_BPU_PERFCTR2 		0x302
++#define MSR_P4_BPU_PERFCTR3 		0x303
++#define MSR_P4_MS_PERFCTR0 		0x304
++#define MSR_P4_MS_PERFCTR1 		0x305
++#define MSR_P4_MS_PERFCTR2 		0x306
++#define MSR_P4_MS_PERFCTR3 		0x307
++#define MSR_P4_FLAME_PERFCTR0 		0x308
++#define MSR_P4_FLAME_PERFCTR1 		0x309
++#define MSR_P4_FLAME_PERFCTR2 		0x30a
++#define MSR_P4_FLAME_PERFCTR3 		0x30b
++#define MSR_P4_IQ_PERFCTR0 		0x30c
++#define MSR_P4_IQ_PERFCTR1 		0x30d
++#define MSR_P4_IQ_PERFCTR2 		0x30e
++#define MSR_P4_IQ_PERFCTR3 		0x30f
++#define MSR_P4_IQ_PERFCTR4 		0x310
++#define MSR_P4_IQ_PERFCTR5 		0x311
++#define MSR_P4_BPU_CCCR0 		0x360
++#define MSR_P4_BPU_CCCR1 		0x361
++#define MSR_P4_BPU_CCCR2 		0x362
++#define MSR_P4_BPU_CCCR3 		0x363
++#define MSR_P4_MS_CCCR0 		0x364
++#define MSR_P4_MS_CCCR1 		0x365
++#define MSR_P4_MS_CCCR2 		0x366
++#define MSR_P4_MS_CCCR3 		0x367
++#define MSR_P4_FLAME_CCCR0 		0x368
++#define MSR_P4_FLAME_CCCR1 		0x369
++#define MSR_P4_FLAME_CCCR2 		0x36a
++#define MSR_P4_FLAME_CCCR3 		0x36b
++#define MSR_P4_IQ_CCCR0 		0x36c
++#define MSR_P4_IQ_CCCR1 		0x36d
++#define MSR_P4_IQ_CCCR2 		0x36e
++#define MSR_P4_IQ_CCCR3 		0x36f
++#define MSR_P4_IQ_CCCR4 		0x370
++#define MSR_P4_IQ_CCCR5 		0x371
++#define MSR_P4_ALF_ESCR0 		0x3ca
++#define MSR_P4_ALF_ESCR1 		0x3cb
++#define MSR_P4_BPU_ESCR0 		0x3b2
++#define MSR_P4_BPU_ESCR1 		0x3b3
++#define MSR_P4_BSU_ESCR0 		0x3a0
++#define MSR_P4_BSU_ESCR1 		0x3a1
++#define MSR_P4_CRU_ESCR0 		0x3b8
++#define MSR_P4_CRU_ESCR1 		0x3b9
++#define MSR_P4_CRU_ESCR2 		0x3cc
++#define MSR_P4_CRU_ESCR3 		0x3cd
++#define MSR_P4_CRU_ESCR4 		0x3e0
++#define MSR_P4_CRU_ESCR5 		0x3e1
++#define MSR_P4_DAC_ESCR0 		0x3a8
++#define MSR_P4_DAC_ESCR1 		0x3a9
++#define MSR_P4_FIRM_ESCR0 		0x3a4
++#define MSR_P4_FIRM_ESCR1 		0x3a5
++#define MSR_P4_FLAME_ESCR0 		0x3a6
++#define MSR_P4_FLAME_ESCR1 		0x3a7
++#define MSR_P4_FSB_ESCR0 		0x3a2
++#define MSR_P4_FSB_ESCR1 		0x3a3
++#define MSR_P4_IQ_ESCR0 		0x3ba
++#define MSR_P4_IQ_ESCR1 		0x3bb
++#define MSR_P4_IS_ESCR0 		0x3b4
++#define MSR_P4_IS_ESCR1 		0x3b5
++#define MSR_P4_ITLB_ESCR0 		0x3b6
++#define MSR_P4_ITLB_ESCR1 		0x3b7
++#define MSR_P4_IX_ESCR0 		0x3c8
++#define MSR_P4_IX_ESCR1 		0x3c9
++#define MSR_P4_MOB_ESCR0 		0x3aa
++#define MSR_P4_MOB_ESCR1 		0x3ab
++#define MSR_P4_MS_ESCR0 		0x3c0
++#define MSR_P4_MS_ESCR1 		0x3c1
++#define MSR_P4_PMH_ESCR0 		0x3ac
++#define MSR_P4_PMH_ESCR1 		0x3ad
++#define MSR_P4_RAT_ESCR0 		0x3bc
++#define MSR_P4_RAT_ESCR1 		0x3bd
++#define MSR_P4_SAAT_ESCR0 		0x3ae
++#define MSR_P4_SAAT_ESCR1 		0x3af
++#define MSR_P4_SSU_ESCR0 		0x3be
++#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
++#define MSR_P4_TBPU_ESCR0 		0x3c2
++#define MSR_P4_TBPU_ESCR1 		0x3c3
++#define MSR_P4_TC_ESCR0 		0x3c4
++#define MSR_P4_TC_ESCR1 		0x3c5
++#define MSR_P4_U2L_ESCR0 		0x3b0
++#define MSR_P4_U2L_ESCR1 		0x3b1
 +
-+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/nmi.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/nmi.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,93 @@
++/*
++ *  linux/include/asm-i386/nmi.h
++ */
++#ifndef ASM_NMI_H
++#define ASM_NMI_H
 +
-+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++#include <linux/pm.h>
++#include <asm/io.h>
 +
-+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++#include <xen/interface/nmi.h>
 +
-+static inline void clear_LDT(void)
-+{
-+	int cpu = get_cpu();
++struct pt_regs;
 +
-+	/*
-+	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+	 * it slows down context switching. Noone uses it anyway.
-+	 */
-+	cpu = cpu;              /* XXX avoid compiler warning */
-+	xen_set_ldt(0UL, 0);
-+	put_cpu();
-+}
++typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
 +
-+/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
++/**
++ * set_nmi_callback
++ *
++ * Set a handler for an NMI. Only one handler may be
++ * set. Return 1 if the NMI was handled.
 + */
-+extern struct desc_struct default_ldt[];
-+#ifndef CONFIG_X86_NO_IDT
-+extern struct gate_struct idt_table[]; 
-+#endif
-+extern struct desc_ptr cpu_gdt_descr[];
++void set_nmi_callback(nmi_callback_t callback);
 +
-+/* the cpu gdt accessor */
-+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
++/**
++ * unset_nmi_callback
++ *
++ * Remove the handler previously set.
++ */
++void unset_nmi_callback(void);
 +
-+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)  
-+{
-+	struct gate_struct s; 	
-+	s.offset_low = PTR_LOW(func); 
-+	s.segment = __KERNEL_CS;
-+	s.ist = ist; 
-+	s.p = 1;
-+	s.dpl = dpl; 
-+	s.zero0 = 0;
-+	s.zero1 = 0; 
-+	s.type = type; 
-+	s.offset_middle = PTR_MIDDLE(func); 
-+	s.offset_high = PTR_HIGH(func); 
-+	/* does not need to be atomic because it is only done once at setup time */ 
-+	memcpy(adr, &s, 16); 
-+} 
++#ifdef CONFIG_PM
++ 
++/** Replace the PM callback routine for NMI. */
++struct pm_dev * set_nmi_pm_callback(pm_callback callback);
 +
-+#ifndef CONFIG_X86_NO_IDT
-+static inline void set_intr_gate(int nr, void *func) 
-+{ 
-+	BUG_ON((unsigned)nr > 0xFF);
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 
-+} 
++/** Unset the PM callback routine back to the default. */
++void unset_nmi_pm_callback(struct pm_dev * dev);
 +
-+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 
-+{ 
-+	BUG_ON((unsigned)nr > 0xFF);
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 
-+} 
++#else
 +
-+static inline void set_system_gate(int nr, void *func) 
-+{ 
-+	BUG_ON((unsigned)nr > 0xFF);
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 
++static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
++{
++	return 0;
 +} 
-+
-+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
++ 
++static inline void unset_nmi_pm_callback(struct pm_dev * dev)
 +{
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
 +}
-+#endif
 +
-+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, 
-+					 unsigned size) 
-+{ 
-+	struct ldttss_desc d;
-+	memset(&d,0,sizeof(d)); 
-+	d.limit0 = size & 0xFFFF;
-+	d.base0 = PTR_LOW(tss); 
-+	d.base1 = PTR_MIDDLE(tss) & 0xFF; 
-+	d.type = type;
-+	d.p = 1; 
-+	d.limit1 = (size >> 16) & 0xF;
-+	d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; 
-+	d.base3 = PTR_HIGH(tss); 
-+	memcpy(ptr, &d, 16); 
-+}
++#endif /* CONFIG_PM */
++ 
++extern void default_do_nmi(struct pt_regs *);
++extern void die_nmi(char *str, struct pt_regs *regs);
 +
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void set_tss_desc(unsigned cpu, void *addr)
-+{ 
-+	/*
-+	 * sizeof(unsigned long) coming from an extra "long" at the end
-+	 * of the iobitmap. See tss_struct definition in processor.h
-+	 *
-+	 * -1? seg base+limit should be pointing to the address of the
-+	 * last valid byte
-+	 */
-+	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], 
-+		(unsigned long)addr, DESC_TSS,
-+		IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
-+} 
-+#endif
++static inline unsigned char get_nmi_reason(void)
++{
++        shared_info_t *s = HYPERVISOR_shared_info;
++        unsigned char reason = 0;
 +
-+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
-+{ 
-+	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
-+			      DESC_LDT, size * 8 - 1);
-+}
++        /* construct a value which looks like it came from
++         * port 0x61.
++         */
++        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++                reason |= 0x40;
++        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++                reason |= 0x80;
 +
-+static inline void set_seg_base(unsigned cpu, int entry, void *base)
-+{ 
-+	struct desc_struct *d = &cpu_gdt(cpu)[entry];
-+	u32 addr = (u32)(u64)base;
-+	BUG_ON((u64)base >> 32); 
-+	d->base0 = addr & 0xffff;
-+	d->base1 = (addr >> 16) & 0xff;
-+	d->base2 = (addr >> 24) & 0xff;
-+} 
++        return reason;
++}
 +
-+#define LDT_entry_a(info) \
-+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-+/* Don't allow setting of the lm bit. It is useless anyways because 
-+   64bit system calls require __USER_CS. */ 
-+#define LDT_entry_b(info) \
-+	(((info)->base_addr & 0xff000000) | \
-+	(((info)->base_addr & 0x00ff0000) >> 16) | \
-+	((info)->limit & 0xf0000) | \
-+	(((info)->read_exec_only ^ 1) << 9) | \
-+	((info)->contents << 10) | \
-+	(((info)->seg_not_present ^ 1) << 15) | \
-+	((info)->seg_32bit << 22) | \
-+	((info)->limit_in_pages << 23) | \
-+	((info)->useable << 20) | \
-+	/* ((info)->lm << 21) | */ \
-+	0x7000)
++extern int panic_on_timeout;
++extern int unknown_nmi_panic;
 +
-+#define LDT_empty(info) (\
-+	(info)->base_addr	== 0	&& \
-+	(info)->limit		== 0	&& \
-+	(info)->contents	== 0	&& \
-+	(info)->read_exec_only	== 1	&& \
-+	(info)->seg_32bit	== 0	&& \
-+	(info)->limit_in_pages	== 0	&& \
-+	(info)->seg_not_present	== 1	&& \
-+	(info)->useable		== 0	&& \
-+	(info)->lm		== 0)
++extern int check_nmi_watchdog(void);
++ 
++extern void setup_apic_nmi_watchdog (void);
++extern int reserve_lapic_nmi(void);
++extern void release_lapic_nmi(void);
++extern void disable_timer_nmi_watchdog(void);
++extern void enable_timer_nmi_watchdog(void);
++extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
 +
-+#if TLS_SIZE != 24
-+# error update this code.
-+#endif
++extern void nmi_watchdog_default(void);
++extern int setup_nmi_watchdog(char *);
 +
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+#if 0
-+	u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
-+	gdt[0] = t->tls_array[0];
-+	gdt[1] = t->tls_array[1];
-+	gdt[2] = t->tls_array[2];
-+#endif
-+#define C(i) \
-+	HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
++extern unsigned int nmi_watchdog;
++#define NMI_DEFAULT	-1
++#define NMI_NONE	0
++#define NMI_IO_APIC	1
++#define NMI_LOCAL_APIC	2
++#define NMI_INVALID	3
 +
-+	C(0); C(1); C(2);
-+#undef C
-+} 
++#endif /* ASM_NMI_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/page.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/page.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,212 @@
++#ifndef _X86_64_PAGE_H
++#define _X86_64_PAGE_H
++
++/* #include <linux/string.h> */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <asm/bug.h>
++#endif
++#include <xen/interface/xen.h> 
 +
 +/*
-+ * load one particular LDT into the current CPU
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
 + */
-+static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
-+{
-+	void *segments = pc->ldt;
-+	int count = pc->size;
-+
-+	if (likely(!count))
-+		segments = NULL;
++#define _PAGE_PRESENT	0x001
++#define _PAGE_IO	0x200
 +
-+	xen_set_ldt((unsigned long)segments, count);
-+}
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT	12
++#ifdef __ASSEMBLY__
++#define PAGE_SIZE	(0x1 << PAGE_SHIFT)
++#else
++#define PAGE_SIZE	(1UL << PAGE_SHIFT)
++#endif
++#define PAGE_MASK	(~(PAGE_SIZE-1))
 +
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+	int cpu = get_cpu();
-+	load_LDT_nolock(pc, cpu);
-+	put_cpu();
-+}
++/* See Documentation/x86_64/mm.txt for a description of the memory map. */
++#define __PHYSICAL_MASK_SHIFT	46
++#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK_SHIFT	48
++#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 +
-+extern struct desc_ptr idt_descr;
++#define PHYSICAL_PAGE_MASK	(~(PAGE_SIZE-1) & __PHYSICAL_MASK)
 +
-+#endif /* !__ASSEMBLY__ */
++#define THREAD_ORDER 1 
++#define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
++#define CURRENT_MASK (~(THREAD_SIZE-1))
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/dma-mapping.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/dma-mapping.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/dma-mapping.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,207 @@
-+#ifndef _X8664_DMA_MAPPING_H
-+#define _X8664_DMA_MAPPING_H 1
++#define EXCEPTION_STACK_ORDER 0
++#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
 +
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
++#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
++#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
 +
++#define IRQSTACK_ORDER 2
++#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
 +
-+#include <asm/scatterlist.h>
-+#include <asm/io.h>
-+#include <asm/swiotlb.h>
++#define STACKFAULT_STACK 1
++#define DOUBLEFAULT_STACK 2
++#define NMI_STACK 3
++#define DEBUG_STACK 4
++#define MCE_STACK 5
++#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
 +
-+struct dma_mapping_ops {
-+	int             (*mapping_error)(dma_addr_t dma_addr);
-+	void*           (*alloc_coherent)(struct device *dev, size_t size,
-+                                dma_addr_t *dma_handle, gfp_t gfp);
-+	void            (*free_coherent)(struct device *dev, size_t size,
-+                                void *vaddr, dma_addr_t dma_handle);
-+	dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
-+                                size_t size, int direction);
-+	/* like map_single, but doesn't check the device mask */
-+	dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
-+                                size_t size, int direction);
-+	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
-+		                size_t size, int direction);
-+	void            (*sync_single_for_cpu)(struct device *hwdev,
-+		                dma_addr_t dma_handle, size_t size,
-+				int direction);
-+	void            (*sync_single_for_device)(struct device *hwdev,
-+                                dma_addr_t dma_handle, size_t size,
-+				int direction);
-+	void            (*sync_single_range_for_cpu)(struct device *hwdev,
-+                                dma_addr_t dma_handle, unsigned long offset,
-+		                size_t size, int direction);
-+	void            (*sync_single_range_for_device)(struct device *hwdev,
-+				dma_addr_t dma_handle, unsigned long offset,
-+		                size_t size, int direction);
-+	void            (*sync_sg_for_cpu)(struct device *hwdev,
-+                                struct scatterlist *sg, int nelems,
-+				int direction);
-+	void            (*sync_sg_for_device)(struct device *hwdev,
-+				struct scatterlist *sg, int nelems,
-+				int direction);
-+	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
-+		                int nents, int direction);
-+	void            (*unmap_sg)(struct device *hwdev,
-+				struct scatterlist *sg, int nents,
-+				int direction);
-+	int             (*dma_supported)(struct device *hwdev, u64 mask);
-+	int		is_phys;
-+};
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
 +
-+extern dma_addr_t bad_dma_address;
-+extern struct dma_mapping_ops* dma_ops;
-+extern int iommu_merge;
++#define HPAGE_SHIFT PMD_SHIFT
++#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 +
-+static inline int valid_dma_direction(int dma_direction)
-+{
-+	return ((dma_direction == DMA_BIDIRECTIONAL) ||
-+		(dma_direction == DMA_TO_DEVICE) ||
-+		(dma_direction == DMA_FROM_DEVICE));
-+}
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
 +
-+#if 0
-+static inline int dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	if (dma_ops->mapping_error)
-+		return dma_ops->mapping_error(dma_addr);
++extern unsigned long end_pfn;
 +
-+	return (dma_addr == bad_dma_address);
-+}
++#include <asm/maddr.h>
 +
-+extern void *dma_alloc_coherent(struct device *dev, size_t size,
-+				dma_addr_t *dma_handle, gfp_t gfp);
-+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-+			      dma_addr_t dma_handle);
++void clear_page(void *);
++void copy_page(void *, void *);
 +
-+static inline dma_addr_t
-+dma_map_single(struct device *hwdev, void *ptr, size_t size,
-+	       int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	return dma_ops->map_single(hwdev, ptr, size, direction);
-+}
++#define clear_user_page(page, vaddr, pg)	clear_page(page)
++#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 +
-+static inline void
-+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
-+		 int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	dma_ops->unmap_single(dev, addr, size, direction);
-+}
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 +
-+#define dma_map_page(dev,page,offset,size,dir) \
-+	dma_map_single((dev), page_address(page)+(offset), (size), (dir))
++/*
++ * These are used to make use of C type-checking..
++ */
++typedef struct { unsigned long pte; } pte_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
++#define PTE_MASK	PHYSICAL_PAGE_MASK
 +
-+#define dma_unmap_page dma_unmap_single
++typedef struct { unsigned long pgprot; } pgprot_t;
 +
-+static inline void
-+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+			size_t size, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_for_cpu)
-+		dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
-+					     direction);
-+	flush_write_buffers();
-+}
++#define __pte_val(x) ((x).pte)
++#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO))	\
++		    == _PAGE_PRESENT ?				\
++		    pte_machine_to_phys(__pte_val(x)) :		\
++		    __pte_val(x))
 +
-+static inline void
-+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+			   size_t size, int direction)
++#define __pmd_val(x) ((x).pmd)
++static inline unsigned long pmd_val(pmd_t x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_for_device)
-+		dma_ops->sync_single_for_device(hwdev, dma_handle, size,
-+						direction);
-+	flush_write_buffers();
++	unsigned long ret = __pmd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++	return ret;
 +}
 +
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+			      unsigned long offset, size_t size, int direction)
++#define __pud_val(x) ((x).pud)
++static inline unsigned long pud_val(pud_t x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_range_for_cpu) {
-+		dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
-+	}
-+
-+	flush_write_buffers();
++	unsigned long ret = __pud_val(x);
++	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++	return ret;
 +}
 +
-+static inline void
-+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+				 unsigned long offset, size_t size, int direction)
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long pgd_val(pgd_t x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_range_for_device)
-+		dma_ops->sync_single_range_for_device(hwdev, dma_handle,
-+						      offset, size, direction);
-+
-+	flush_write_buffers();
++	unsigned long ret = __pgd_val(x);
++	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++	return ret;
 +}
 +
-+static inline void
-+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+		    int nelems, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_sg_for_cpu)
-+		dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
-+	flush_write_buffers();
-+}
++#define pgprot_val(x)	((x).pgprot)
 +
-+static inline void
-+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+		       int nelems, int direction)
++static inline pte_t __pte(unsigned long x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_sg_for_device) {
-+		dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
-+	}
-+
-+	flush_write_buffers();
++	if ((x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
++		x = pte_phys_to_machine(x);
++	return ((pte_t) { (x) });
 +}
 +
-+static inline int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
++static inline pmd_t __pmd(unsigned long x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	return dma_ops->map_sg(hwdev, sg, nents, direction);
++	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++	return ((pmd_t) { (x) });
 +}
 +
-+static inline void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	     int direction)
++static inline pud_t __pud(unsigned long x)
 +{
-+	BUG_ON(!valid_dma_direction(direction));
-+	dma_ops->unmap_sg(hwdev, sg, nents, direction);
++	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++	return ((pud_t) { (x) });
 +}
 +
-+extern int dma_supported(struct device *hwdev, u64 mask);
-+
-+/* same for gart, swiotlb, and nommu */
-+static inline int dma_get_cache_alignment(void)
++static inline pgd_t __pgd(unsigned long x)
 +{
-+	return boot_cpu_data.x86_clflush_size;
++	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++	return ((pgd_t) { (x) });
 +}
 +
-+#define dma_is_consistent(h) 1
++#define __pgprot(x)	((pgprot_t) { (x) } )
 +
-+extern int dma_set_mask(struct device *dev, u64 mask);
++#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
++#define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map	0xffffffff80000000UL
++#define __PAGE_OFFSET           0xffff880000000000UL	
 +
-+static inline void
-+dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
-+{
-+	flush_write_buffers();
-+}
++#else
++#define __PHYSICAL_START	CONFIG_PHYSICAL_START
++#define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map	0xffffffff80000000
++#define __PAGE_OFFSET           0xffff880000000000
++#endif /* !__ASSEMBLY__ */
 +
-+extern struct device fallback_dev;
-+extern int panic_on_overflow;
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET		0
 +#endif
 +
-+#endif /* _X8664_DMA_MAPPING_H */
-+
-+#include <asm-i386/mach-xen/asm/dma-mapping.h>
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/e820.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/e820.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/e820.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/e820.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,66 @@
-+/*
-+ * structures and definitions for the int 15, ax=e820 memory map
-+ * scheme.
-+ *
-+ * In a nutshell, setup.S populates a scratch table in the
-+ * empty_zero_block that contains a list of usable address/size
-+ * duples.  setup.c, this information is transferred into the e820map,
-+ * and in init.c/numa.c, that new information is used to mark pages
-+ * reserved or not.
-+ */
-+#ifndef __E820_HEADER
-+#define __E820_HEADER
-+
-+#include <linux/mmzone.h>
-+
-+#define E820MAP	0x2d0		/* our map */
-+#define E820MAX	128		/* number of entries in E820MAP */
-+#define E820NR	0x1e8		/* # entries in E820MAP */
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
 +
-+#define E820_RAM	1
-+#define E820_RESERVED	2
-+#define E820_ACPI	3 /* usable as RAM once ACPI tables have been read */
-+#define E820_NVS	4
++#define KERNEL_TEXT_SIZE  (40UL*1024*1024)
++#define KERNEL_TEXT_START 0xffffffff80000000UL 
 +
-+#define HIGH_MEMORY	(1024*1024)
++#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
 +
-+#define LOWMEMSIZE()	(0x9f000)
++/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
++   Otherwise you risk miscompilation. */ 
++#define __pa(x)			(((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++   This seems to be the official gcc blessed way to do such arithmetic. */ 
++#define __pa_symbol(x)		\
++	({unsigned long v;  \
++	  asm("" : "=r" (v) : "0" (x)); \
++	  __pa(v); })
 +
-+#ifndef __ASSEMBLY__
-+struct e820entry {
-+	u64 addr;	/* start of memory segment */
-+	u64 size;	/* size of memory segment */
-+	u32 type;	/* type of memory segment */
-+} __attribute__((packed));
++#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define __boot_va(x)		__va(x)
++#define __boot_pa(x)		__pa(x)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn)		((pfn) < end_pfn)
++#endif
 +
-+struct e820map {
-+    int nr_map;
-+	struct e820entry map[E820MAX];
-+};
++#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 +
-+extern unsigned long find_e820_area(unsigned long start, unsigned long end, 
-+				    unsigned size);
-+extern void add_memory_region(unsigned long start, unsigned long size, 
-+			      int type);
-+extern void setup_memory_region(void);
-+extern void contig_e820_setup(void); 
-+extern unsigned long e820_end_of_ram(void);
-+extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
-+extern void e820_print_map(char *who);
-+extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-+extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
++#define VM_DATA_DEFAULT_FLAGS \
++	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 +
-+extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
-+extern void e820_setup_gap(struct e820entry *e820, int nr_map);
-+extern unsigned long e820_hole_size(unsigned long start_pfn,
-+				    unsigned long end_pfn);
++#define __HAVE_ARCH_GATE_AREA 1	
 +
-+extern void __init parse_memopt(char *p, char **end);
-+extern void __init parse_memmapopt(char *p, char **end);
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
 +
-+extern struct e820map e820;
++#endif /* __KERNEL__ */
 +
-+extern unsigned ebda_addr, ebda_size;
-+#endif/*!__ASSEMBLY__*/
++#endif /* _X86_64_PAGE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/pci.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pci.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,168 @@
++#ifndef __x8664_PCI_H
++#define __x8664_PCI_H
 +
-+#endif/*__E820_HEADER*/
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/fixmap.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/fixmap.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/fixmap.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,112 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ */
++#include <asm/io.h>
 +
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
++#ifdef __KERNEL__
 +
-+#include <linux/kernel.h>
-+#include <asm/apicdef.h>
-+#include <asm/page.h>
-+#include <asm/vsyscall.h>
-+#include <asm/vsyscall32.h>
-+#include <asm/acpi.h>
++#include <linux/mm.h> /* for struct page */
 +
-+/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
-+ */
++/* Can be used to override the logic in pci_scan_bus for skipping
++   already-configured bus numbers - to be used for buggy BIOSes
++   or architectures with incomplete PCI setup by the loader */
 +
-+enum fixed_addresses {
-+	VSYSCALL_LAST_PAGE,
-+	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
-+	VSYSCALL_HPET,
-+	FIX_HPET_BASE,
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	FIX_IO_APIC_BASE_0,
-+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#ifdef CONFIG_ACPI
-+	FIX_ACPI_BEGIN,
-+	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses()	0
 +#endif
-+	FIX_SHARED_INFO,
-+#define NR_FIX_ISAMAPS	256
-+	FIX_ISAMAP_END,
-+	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+	__end_of_permanent_fixed_addresses,
-+	/* temporary boot-time mappings, used before ioremap() is functional */
-+#define NR_FIX_BTMAPS	16
-+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-+	__end_of_fixed_addresses
-+};
-+
-+extern void __set_fixmap (enum fixed_addresses idx,
-+					unsigned long phys, pgprot_t flags);
 +
-+#define set_fixmap(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL)
-+/*
-+ * Some hardware wants to get fixmapped without caching.
-+ */
-+#define set_fixmap_nocache(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+                __set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP	(VSYSCALL_END-PAGE_SIZE)
-+#define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
++#include <asm/hypervisor.h>
++#define pcibios_scan_all_fns(a, b)	(!is_initial_xendomain())
 +
-+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-+#define FIXADDR_USER_START	((unsigned long)VSYSCALL32_VSYSCALL)
-+#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO		0x1000
++#define PCIBIOS_MIN_MEM		(pci_mem_start)
 +
-+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define PCIBIOS_MIN_CARDBUS_IO	0x4000
 +
-+extern void __this_fixmap_does_not_exist(void);
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
++extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
 +
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without translation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-+{
-+	/*
-+	 * this branch gets completely eliminated after inlining,
-+	 * except when someone tries to use fixaddr indices in an
-+	 * illegal way. (such as mixing up address types or using
-+	 * out-of-range indices).
-+	 *
-+	 * If it doesn't get removed, the linker will complain
-+	 * loudly with a reasonably clear error message..
-+	 */
-+	if (idx >= __end_of_fixed_addresses)
-+		__this_fixmap_does_not_exist();
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
 +
-+        return __fix_to_virt(idx);
-+}
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/page.h>
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/floppy.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/floppy.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/floppy.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,206 @@
-+/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
++extern void pci_iommu_alloc(void);
++extern int iommu_setup(char *opt);
++
++/* The PCI address space does equal the physical memory
++ * address space.  The networking and block device layers use
++ * this boolean for bounce buffer decisions
 + *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ * On AMD64 it mostly equals, but we set it to zero if a hardware
++ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
 + */
-+#ifndef __ASM_XEN_X86_64_FLOPPY_H
-+#define __ASM_XEN_X86_64_FLOPPY_H
++#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
 +
-+#include <linux/vmalloc.h>
++#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
 +
 +/*
-+ * The DMA channel used by the floppy controller cannot access data at
-+ * addresses >= 16MB
-+ *
-+ * Went back to the 1MB limit, as some people had problems with the floppy
-+ * driver otherwise. It doesn't matter much for performance anyway, as most
-+ * floppy accesses go through the track buffer.
++ * x86-64 always supports DAC, but sometimes it is useful to force
++ * devices through the IOMMU to get automatic sg list merging.
++ * Optional right now.
 + */
-+#define _CROSS_64KB(a,s,vdma) \
-+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
++extern int iommu_sac_force;
++#define pci_dac_dma_supported(pci_dev, mask)	(!iommu_sac_force)
 +
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port)			inb_p(port)
-+#define fd_outb(value,port)		outb_p(value,port)
-+
-+#define fd_request_dma()        (0)
-+#define fd_free_dma()           ((void)0)
-+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue()    vdma_get_dma_residue(FLOPPY_DMA)
-+/*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+#define fd_dma_mem_alloc(size)	__get_free_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size))
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
-+{
-+	register unsigned char st;
-+
-+#undef TRACE_FLPY_INT
-+
-+#ifdef TRACE_FLPY_INT
-+	static int calls=0;
-+	static int bytes=0;
-+	static int dma_wait=0;
-+#endif
-+	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
-+
-+#ifdef TRACE_FLPY_INT
-+	if(!calls)
-+		bytes = virtual_dma_count;
-+#endif
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
 +
-+	{
-+		register int lcount;
-+		register char *lptr;
++#elif defined(CONFIG_SWIOTLB)
 +
-+		st = 1;
-+		for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
-+		    lcount; lcount--, lptr++) {
-+			st=inb(virtual_dma_port+4) & 0xa0 ;
-+			if(st != 0xa0) 
-+				break;
-+			if(virtual_dma_mode)
-+				outb_p(*lptr, virtual_dma_port+5);
-+			else
-+				*lptr = inb_p(virtual_dma_port+5);
-+		}
-+		virtual_dma_count = lcount;
-+		virtual_dma_addr = lptr;
-+		st = inb(virtual_dma_port+4);
-+	}
++#define pci_dac_dma_supported(pci_dev, mask)    1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
++#else
++/* No IOMMU */
++
++#define pci_dac_dma_supported(pci_dev, mask)    1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME)		(0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
 +
-+#ifdef TRACE_FLPY_INT
-+	calls++;
-+#endif
-+	if(st == 0x20)
-+		return IRQ_HANDLED;
-+	if(!(st & 0x20)) {
-+		virtual_dma_residue += virtual_dma_count;
-+		virtual_dma_count=0;
-+#ifdef TRACE_FLPY_INT
-+		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 
-+		       virtual_dma_count, virtual_dma_residue, calls, bytes,
-+		       dma_wait);
-+		calls = 0;
-+		dma_wait=0;
-+#endif
-+		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
-+		return IRQ_HANDLED;
-+	}
-+#ifdef TRACE_FLPY_INT
-+	if(!virtual_dma_count)
-+		dma_wait++;
 +#endif
-+	return IRQ_HANDLED;
-+}
 +
-+static void fd_disable_dma(void)
++#include <asm-generic/pci-dma-compat.h>
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
 +{
-+	doing_pdma = 0;
-+	virtual_dma_residue += virtual_dma_count;
-+	virtual_dma_count=0;
++	return ((dma64_addr_t) page_to_phys(page) +
++		(dma64_addr_t) offset);
 +}
 +
-+static int vdma_get_dma_residue(unsigned int dummy)
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
 +{
-+	return virtual_dma_count + virtual_dma_residue;
++	return virt_to_page(__va(dma_addr)); 	
 +}
 +
-+
-+static int fd_request_irq(void)
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
 +{
-+	return request_irq(FLOPPY_IRQ, floppy_hardint,
-+			   IRQF_DISABLED, "floppy", NULL);
++	return (dma_addr & ~PAGE_MASK);
 +}
 +
-+#if 0
-+static unsigned long vdma_mem_alloc(unsigned long size)
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
 +{
-+	return (unsigned long) vmalloc(size);
-+
 +}
 +
-+static void vdma_mem_free(unsigned long addr, unsigned long size)
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
 +{
-+	vfree((void *)addr);
++	flush_write_buffers();
 +}
-+#endif
 +
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++					enum pci_dma_burst_strategy *strat,
++					unsigned long *strategy_parameter)
 +{
-+	doing_pdma = 1;
-+	virtual_dma_port = io;
-+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
-+	virtual_dma_addr = addr;
-+	virtual_dma_count = size;
-+	virtual_dma_residue = 0;
-+	return 0;
++	*strat = PCI_DMA_BURST_INFINITY;
++	*strategy_parameter = ~0UL;
 +}
++#endif
 +
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++			       enum pci_mmap_state mmap_state, int write_combine);
 +
-+static int xen_floppy_init(void)
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
 +{
-+	use_virtual_dma = 1;
-+	can_use_virtual_dma = 1;
-+	return 0x3f0;
 +}
 +
-+/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user.		Paul G.
-+ */
-+#define FLOPPY0_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = (CMOS_READ(0x10) >> 4) & 15;		\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define FLOPPY1_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = CMOS_READ(0x10) & 15;			\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
-+
-+#define FLOPPY_MOTOR_MASK 0xf0
-+
-+#define EXTRA_FLOPPY_PARAMS
-+
-+#endif /* __ASM_XEN_X86_64_FLOPPY_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hw_irq.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hw_irq.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hw_irq.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,136 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ *	linux/include/asm/hw_irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ *
-+ *	hacked by Andi Kleen for x86-64.
-+ */
-+
-+#ifndef __ASSEMBLY__
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <linux/profile.h>
-+#include <linux/smp.h>
++#endif /* __KERNEL__ */
 +
-+struct hw_interrupt_type;
++/* generic pci stuff */
++#ifdef CONFIG_PCI
++#include <asm-generic/pci.h>
 +#endif
 +
-+#define NMI_VECTOR		0x02
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR	0x20
++#endif /* __x8664_PCI_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/pgalloc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pgalloc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,204 @@
++#ifndef _X86_64_PGALLOC_H
++#define _X86_64_PGALLOC_H
 +
-+#define IA32_SYSCALL_VECTOR	0x80
++#include <asm/fixmap.h>
++#include <asm/pda.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
++#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
 +
++#include <xen/features.h>
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
 +
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
 +
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ *  some of the following vectors are 'rare', they are merged
-+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ *  TLB, reschedule and local APIC vectors are performance-critical.
-+ */
-+#ifndef CONFIG_XEN
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
-+#define RESCHEDULE_VECTOR	0xfd
-+#define CALL_FUNCTION_VECTOR	0xfc
-+/* fb free - please don't readd KDB here because it's useless
-+   (hint - think what a NMI bit does to a vector) */
-+#define THERMAL_APIC_VECTOR	0xfa
-+#define THRESHOLD_APIC_VECTOR   0xf9
-+/* f8 free */
-+#define INVALIDATE_TLB_VECTOR_END	0xf7
-+#define INVALIDATE_TLB_VECTOR_START	0xf0	/* f0-f7 used for TLB flush */
++static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
++{
++	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++}
 +
-+#define NUM_INVALIDATE_TLB_VECTORS	8
-+#endif
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++	} else {
++		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++	}
++}
 +
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR	0xef
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pmd,
++			       pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, 
++				       PAGE_KERNEL_RO), 0));
++		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
++	} else {
++		*(pud) =  __pud(_PAGE_TABLE | __pa(pmd));
++	}
++}
 +
 +/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
++ * We need to use the batch mode here, but pgd_pupulate() won't be
++ * be called frequently.
 + */
-+#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef   /* duplicated in irq.h */
++static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pud,
++			       pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, 
++				       PAGE_KERNEL_RO), 0));
++		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
++		set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
++	} else {
++		*(pgd) =  __pgd(_PAGE_TABLE | __pa(pud));
++		*(__user_pgd(pgd)) = *(pgd);
++	}
++}
 +
++extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
++extern void pte_free(struct page *pte);
 +
-+#ifndef __ASSEMBLY__
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++	struct page *pg;
 +
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
++	pg = pte_alloc_one(mm, addr);
++	return pg ? page_address(pg) : NULL;
++}
 +
-+extern void disable_8259A_irq(unsigned int irq);
-+extern void enable_8259A_irq(unsigned int irq);
-+extern int i8259A_irq_pending(unsigned int irq);
-+extern void make_8259A_irq(unsigned int irq);
-+extern void init_8259A(int aeoi);
-+extern void FASTCALL(send_IPI_self(int vector));
-+extern void init_VISWS_APIC_irqs(void);
-+extern void setup_IO_APIC(void);
-+extern void disable_IO_APIC(void);
-+extern void print_IO_APIC(void);
-+extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+extern void send_IPI(int dest, int vector);
-+extern void setup_ioapic_dest(void);
++static inline void pmd_free(pmd_t *pmd)
++{
++	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
++	pte_free(virt_to_page(pmd));
++}
 +
-+extern unsigned long io_apic_irqs;
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++	struct page *pg;
 +
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
++	pg = pte_alloc_one(mm, addr);
++	return pg ? page_address(pg) : NULL;
++}
 +
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++static inline void pud_free(pud_t *pud)
++{
++	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
++	pte_free(virt_to_page(pud));
++}
 +
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
++static inline void pgd_list_add(pgd_t *pgd)
++{
++	struct page *page = virt_to_page(pgd);
 +
-+#include <asm/ptrace.h>
++	spin_lock(&pgd_lock);
++	page->index = (pgoff_t)pgd_list;
++	if (pgd_list)
++		pgd_list->private = (unsigned long)&page->index;
++	pgd_list = page;
++	page->private = (unsigned long)&pgd_list;
++	spin_unlock(&pgd_lock);
++}
 +
-+#define IRQ_NAME2(nr) nr##_interrupt(void)
-+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
++static inline void pgd_list_del(pgd_t *pgd)
++{
++	struct page *next, **pprev, *page = virt_to_page(pgd);
 +
-+/*
-+ *	SMP has a few special interrupts for IPI messages
-+ */
++	spin_lock(&pgd_lock);
++	next = (struct page *)page->index;
++	pprev = (struct page **)page->private;
++	*pprev = next;
++	if (next)
++		next->private = (unsigned long)pprev;
++	spin_unlock(&pgd_lock);
++}
 +
-+#define BUILD_IRQ(nr) \
-+asmlinkage void IRQ_NAME(nr); \
-+__asm__( \
-+"\n.p2align\n" \
-+"IRQ" #nr "_interrupt:\n\t" \
-+	"push $~(" #nr ") ; " \
-+	"jmp common_interrupt");
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++	/*
++	 * We allocate two contiguous pages for kernel and user.
++	 */
++	unsigned boundary;
++	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
++	if (!pgd)
++		return NULL;
++	pgd_list_add(pgd);
++	/*
++	 * Copy kernel pointers in from init.
++	 * Could keep a freelist or slab cache of those because the kernel
++	 * part never changes.
++	 */
++	boundary = pgd_index(__PAGE_OFFSET);
++	memset(pgd, 0, boundary * sizeof(pgd_t));
++	memcpy(pgd + boundary,
++	       init_level4_pgt + boundary,
++	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
 +
-+#define platform_legacy_irq(irq)	((irq) < 16)
++	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
++	/*
++	 * Set level3_user_pgt for vsyscall area
++	 */
++	__user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
++		__pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++	return pgd;
++}
 +
-+#endif
++static inline void pgd_free(pgd_t *pgd)
++{
++	pte_t *ptep = virt_to_ptep(pgd);
 +
-+#endif /* _ASM_HW_IRQ_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hypercall.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hypercall.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hypercall.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,406 @@
-+/******************************************************************************
-+ * hypercall.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * 64-bit updates:
-+ *   Benjamin Liu <benjamin.liu at intel.com>
-+ *   Jun Nakajima <jun.nakajima at intel.com>
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++	if (!pte_write(*ptep)) {
++		xen_pgd_unpin(__pa(pgd));
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pgd,
++			       pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
++			       0));
++	}
 +
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
++	ptep = virt_to_ptep(__user_pgd(pgd));
 +
-+#include <linux/string.h> /* memcpy() */
++	if (!pte_write(*ptep)) {
++		xen_pgd_unpin(__pa(__user_pgd(pgd)));
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)__user_pgd(pgd),
++			       pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, 
++				       PAGE_KERNEL),
++			       0));
++	}
 +
-+#ifndef __HYPERVISOR_H__
-+# error "please don't include this file directly"
-+#endif
++	pgd_list_del(pgd);
++	free_pages((unsigned long)pgd, 1);
++}
 +
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++	if (pte)
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
 +
-+#ifdef CONFIG_XEN
-+#define HYPERCALL_STR(name)					\
-+	"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
-+#else
-+#define HYPERCALL_STR(name)					\
-+	"mov hypercall_stubs,%%rax; "				\
-+	"add $("STR(__HYPERVISOR_##name)" * 32),%%rax; "	\
-+	"call *%%rax"
-+#endif
++	return pte;
++}
 +
-+#define _hypercall0(type, name)			\
-+({						\
-+	long __res;				\
-+	asm volatile (				\
-+		HYPERCALL_STR(name)		\
-+		: "=a" (__res)			\
-+		:				\
-+		: "memory" );			\
-+	(type)__res;				\
-+})
++/* Should really implement gc for free page table pages. This could be
++   done with a reference count in struct page. */
 +
-+#define _hypercall1(type, name, a1)				\
-+({								\
-+	long __res, __ign1;					\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=D" (__ign1)			\
-+		: "1" ((long)(a1))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++static inline void pte_free_kernel(pte_t *pte)
++{
++	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
++	make_page_writable(pte, XENFEAT_writable_page_tables);
++	free_page((unsigned long)pte); 
++}
 +
-+#define _hypercall2(type, name, a1, a2)				\
-+({								\
-+	long __res, __ign1, __ign2;				\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
++#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
 +
-+#define _hypercall3(type, name, a1, a2, a3)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
++#endif /* _X86_64_PGALLOC_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/pgtable.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pgtable.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,583 @@
++#ifndef _X86_64_PGTABLE_H
++#define _X86_64_PGTABLE_H
++
++/*
++ * This file contains the functions and defines necessary to modify and use
++ * the x86-64 page table tree.
++ */
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <linux/threads.h>
++#include <linux/sched.h>
++#include <asm/pda.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
 +
-+#define _hypercall4(type, name, a1, a2, a3, a4)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"movq %7,%%r10; "				\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "g" ((long)(a4))		\
-+		: "memory", "r10" );				\
-+	(type)__res;						\
-+})
++extern pud_t level3_user_pgt[512];
 +
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"movq %7,%%r10; movq %8,%%r8; "			\
-+		HYPERCALL_STR(name)				\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "g" ((long)(a4)),		\
-+		"g" ((long)(a5))				\
-+		: "memory", "r10", "r8" );			\
-+	(type)__res;						\
++extern void xen_init_pt(void);
++
++extern pte_t *lookup_address(unsigned long address);
++
++#define virt_to_ptep(va)						\
++({									\
++	pte_t *__ptep = lookup_address((unsigned long)(va));		\
++	BUG_ON(!__ptep || !pte_present(*__ptep));			\
++	__ptep;								\
 +})
 +
-+static inline int
-+HYPERVISOR_set_trap_table(
-+	trap_info_t *table)
-+{
-+	return _hypercall1(int, set_trap_table, table);
-+}
++#define arbitrary_virt_to_machine(va)					\
++	(((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT)		\
++	 | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++#endif
 +
-+static inline int
-+HYPERVISOR_mmu_update(
-+	mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
++extern pud_t level3_kernel_pgt[512];
++extern pud_t level3_physmem_pgt[512];
++extern pud_t level3_ident_pgt[512];
++extern pmd_t level2_kernel_pgt[512];
++extern pgd_t init_level4_pgt[];
++extern pgd_t boot_level4_pgt[];
++extern unsigned long __supported_pte_mask;
 +
-+static inline int
-+HYPERVISOR_mmuext_op(
-+	struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
++#define swapper_pg_dir init_level4_pgt
 +
-+static inline int
-+HYPERVISOR_set_gdt(
-+	unsigned long *frame_list, int entries)
-+{
-+	return _hypercall2(int, set_gdt, frame_list, entries);
-+}
++extern int nonx_setup(char *str);
++extern void paging_init(void);
++extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 +
-+static inline int
-+HYPERVISOR_stack_switch(
-+	unsigned long ss, unsigned long esp)
-+{
-+	return _hypercall2(int, stack_switch, ss, esp);
-+}
++extern unsigned long pgkern_mask;
 +
-+static inline int
-+HYPERVISOR_set_callbacks(
-+	unsigned long event_address, unsigned long failsafe_address, 
-+	unsigned long syscall_address)
-+{
-+	return _hypercall3(int, set_callbacks,
-+			   event_address, failsafe_address, syscall_address);
-+}
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 +
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+	int set)
-+{
-+	return _hypercall1(int, fpu_taskswitch, set);
-+}
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT	39
++#define PTRS_PER_PGD	512
 +
-+static inline int
-+HYPERVISOR_sched_op_compat(
-+	int cmd, unsigned long arg)
-+{
-+	return _hypercall2(int, sched_op_compat, cmd, arg);
-+}
++/*
++ * 3rd level page
++ */
++#define PUD_SHIFT	30
++#define PTRS_PER_PUD	512
 +
-+static inline int
-+HYPERVISOR_sched_op(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, sched_op, cmd, arg);
-+}
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT	21
++#define PTRS_PER_PMD	512
 +
-+static inline long
-+HYPERVISOR_set_timer_op(
-+	u64 timeout)
-+{
-+	return _hypercall1(long, set_timer_op, timeout);
-+}
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE	512
 +
-+static inline int
-+HYPERVISOR_platform_op(
-+	struct xen_platform_op *platform_op)
-+{
-+	platform_op->interface_version = XENPF_INTERFACE_VERSION;
-+	return _hypercall1(int, platform_op, platform_op);
-+}
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++	       &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++	printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++	       &(e), __pmd_val(e), pmd_pfn(e))
++#define pud_ERROR(e) \
++	printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++	       &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++	       &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 +
-+static inline int
-+HYPERVISOR_set_debugreg(
-+	int reg, unsigned long value)
-+{
-+	return _hypercall2(int, set_debugreg, reg, value);
-+}
++#define pgd_none(x)	(!__pgd_val(x))
++#define pud_none(x)	(!__pud_val(x))
 +
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+	int reg)
++static inline void set_pte(pte_t *dst, pte_t val)
 +{
-+	return _hypercall1(unsigned long, get_debugreg, reg);
++	*dst = val;
 +}
 +
-+static inline int
-+HYPERVISOR_update_descriptor(
-+	unsigned long ma, unsigned long word)
-+{
-+	return _hypercall2(int, update_descriptor, ma, word);
-+}
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
 +
-+static inline int
-+HYPERVISOR_memory_op(
-+	unsigned int cmd, void *arg)
++static inline void pud_clear (pud_t * pud)
 +{
-+	return _hypercall2(int, memory_op, cmd, arg);
++	set_pud(pud, __pud(0));
 +}
 +
-+static inline int
-+HYPERVISOR_multicall(
-+	multicall_entry_t *call_list, int nr_calls)
-+{
-+	return _hypercall2(int, multicall, call_list, nr_calls);
-+}
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
 +
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+	unsigned long va, pte_t new_val, unsigned long flags)
++static inline void pgd_clear (pgd_t * pgd)
 +{
-+	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++        set_pgd(pgd, __pgd(0));
++        set_pgd(__user_pgd(pgd), __pgd(0));
 +}
 +
-+static inline int
-+HYPERVISOR_event_channel_op(
-+	int cmd, void *arg)
-+{
-+	int rc = _hypercall2(int, event_channel_op, cmd, arg);
++#define pud_page(pud) \
++    ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		struct evtchn_op op;
-+		op.cmd = cmd;
-+		memcpy(&op.u, arg, sizeof(op.u));
-+		rc = _hypercall1(int, event_channel_op_compat, &op);
-+		memcpy(arg, &op.u, sizeof(op.u));
-+	}
-+#endif
++#define pte_same(a, b)		((a).pte == (b).pte)
 +
-+	return rc;
-+}
++#define pte_pgprot(a)	(__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
 +
-+static inline int
-+HYPERVISOR_acm_op(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, acm_op, cmd, arg);
-+}
++#define PMD_SIZE	(1UL << PMD_SHIFT)
++#define PMD_MASK	(~(PMD_SIZE-1))
++#define PUD_SIZE	(1UL << PUD_SHIFT)
++#define PUD_MASK	(~(PUD_SIZE-1))
++#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
++#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 +
-+static inline int
-+HYPERVISOR_xen_version(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, xen_version, cmd, arg);
-+}
++#define USER_PTRS_PER_PGD	((TASK_SIZE-1)/PGDIR_SIZE+1)
++#define FIRST_USER_ADDRESS	0
 +
-+static inline int
-+HYPERVISOR_console_io(
-+	int cmd, int count, char *str)
-+{
-+	return _hypercall3(int, console_io, cmd, count, str);
-+}
++#ifndef __ASSEMBLY__
++#define MAXMEM		 0x3fffffffffffUL
++#define VMALLOC_START    0xffffc20000000000UL
++#define VMALLOC_END      0xffffe1ffffffffffUL
++#define MODULES_VADDR    0xffffffff88000000UL
++#define MODULES_END      0xfffffffffff00000UL
++#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
 +
-+static inline int
-+HYPERVISOR_physdev_op(
-+	int cmd, void *arg)
-+{
-+	int rc = _hypercall2(int, physdev_op, cmd, arg);
++#define _PAGE_BIT_PRESENT	0
++#define _PAGE_BIT_RW		1
++#define _PAGE_BIT_USER		2
++#define _PAGE_BIT_PWT		3
++#define _PAGE_BIT_PCD		4
++#define _PAGE_BIT_ACCESSED	5
++#define _PAGE_BIT_DIRTY		6
++#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
++#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
++#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
++
++#define _PAGE_PRESENT	0x001
++#define _PAGE_RW	0x002
++#define _PAGE_USER	0x004
++#define _PAGE_PWT	0x008
++#define _PAGE_PCD	0x010
++#define _PAGE_ACCESSED	0x020
++#define _PAGE_DIRTY	0x040
++#define _PAGE_PSE	0x080	/* 2MB page */
++#define _PAGE_FILE	0x040	/* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_GLOBAL	0x100	/* Global TLB entry */
++
++#define _PAGE_PROTNONE	0x080	/* If not present */
++#define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
++
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO	0x200
 +
 +#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (unlikely(rc == -ENOSYS)) {
-+		struct physdev_op op;
-+		op.cmd = cmd;
-+		memcpy(&op.u, arg, sizeof(op.u));
-+		rc = _hypercall1(int, physdev_op_compat, &op);
-+		memcpy(arg, &op.u, sizeof(op.u));
-+	}
++extern unsigned int __kernel_page_user;
++#else
++#define __kernel_page_user 0
 +#endif
 +
-+	return rc;
-+}
++#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
 +
-+static inline int
-+HYPERVISOR_grant_table_op(
-+	unsigned int cmd, void *uop, unsigned int count)
-+{
-+	return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
++#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
 +
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+	return _hypercall4(int, update_va_mapping_otherdomain, va,
-+			   new_val.pte, flags, domid);
-+}
++#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY PAGE_COPY_NOEXEC
++#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_EXEC \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
++#define __PAGE_KERNEL_NOCACHE \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_RO \
++	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_VSYSCALL \
++	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
++	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE \
++	(__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC \
++	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
 +
-+static inline int
-+HYPERVISOR_vm_assist(
-+	unsigned int cmd, unsigned int type)
-+{
-+	return _hypercall2(int, vm_assist, cmd, type);
-+}
++/*
++ * We don't support GLOBAL page in xenolinux64
++ */
++#define MAKE_GLOBAL(x) __pgprot((x))
 +
-+static inline int
-+HYPERVISOR_vcpu_op(
-+	int cmd, int vcpuid, void *extra_args)
-+{
-+	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
++#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
++#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
 +
-+static inline int
-+HYPERVISOR_set_segment_base(
-+	int reg, unsigned long value)
++/*         xwr */
++#define __P000	PAGE_NONE
++#define __P001	PAGE_READONLY
++#define __P010	PAGE_COPY
++#define __P011	PAGE_COPY
++#define __P100	PAGE_READONLY_EXEC
++#define __P101	PAGE_READONLY_EXEC
++#define __P110	PAGE_COPY_EXEC
++#define __P111	PAGE_COPY_EXEC
++
++#define __S000	PAGE_NONE
++#define __S001	PAGE_READONLY
++#define __S010	PAGE_SHARED
++#define __S011	PAGE_SHARED
++#define __S100	PAGE_READONLY_EXEC
++#define __S101	PAGE_READONLY_EXEC
++#define __S110	PAGE_SHARED_EXEC
++#define __S111	PAGE_SHARED_EXEC
++
++static inline unsigned long pgd_bad(pgd_t pgd)
 +{
-+	return _hypercall2(int, set_segment_base, reg, value);
++       unsigned long val = __pgd_val(pgd);
++       val &= ~PTE_MASK;
++       val &= ~(_PAGE_USER | _PAGE_DIRTY);
++       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
 +}
 +
-+static inline int
-+HYPERVISOR_suspend(
-+	unsigned long srec)
-+{
-+	struct sched_shutdown sched_shutdown = {
-+		.reason = SHUTDOWN_suspend
-+	};
++static inline unsigned long pud_bad(pud_t pud) 
++{ 
++       unsigned long val = __pud_val(pud);
++       val &= ~PTE_MASK; 
++       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
++       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
++} 
 +
-+	int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+			     &sched_shutdown, srec);
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (rc == -ENOSYS)
-+		rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
-+				 SHUTDOWN_suspend, srec);
-+#endif
++#define pte_none(x)	(!(x).pte)
++#define pte_present(x)	((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 +
-+	return rc;
-+}
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 +
-+static inline int
-+HYPERVISOR_nmi_op(
-+	unsigned long op, void *arg)
-+{
-+	return _hypercall2(int, nmi_op, op, arg);
-+}
++#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
++	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte & _PAGE_IO ? end_pfn :	\
++		       (_pte).pte & _PAGE_PRESENT ?		\
++		       mfn_to_local_pfn(__pte_mfn(_pte)) :	\
++		       __pte_mfn(_pte))
 +
-+static inline unsigned long
-+HYPERVISOR_hvm_op(
-+    int op, void *arg)
-+{
-+    return _hypercall2(unsigned long, hvm_op, op, arg);
-+}
++#define pte_page(x)	pfn_to_page(pte_pfn(x))
 +
-+static inline int
-+HYPERVISOR_callback_op(
-+	int cmd, void *arg)
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 +{
-+	return _hypercall2(int, callback_op, cmd, arg);
++	unsigned long pte = page_nr << PAGE_SHIFT;
++	pte |= pgprot_val(pgprot);
++	pte &= __supported_pte_mask;
++	return __pte(pte);
 +}
 +
-+static inline int
-+HYPERVISOR_xenoprof_op(
-+	int op, void *arg)
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 +{
-+	return _hypercall2(int, xenoprof_op, op, arg);
++	pte_t pte = *ptep;
++	if (!pte_none(pte)) {
++		if ((mm != &init_mm) ||
++		    HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
++			pte = __pte_ma(xchg(&ptep->pte, 0));
++	}
++	return pte;
 +}
 +
-+static inline int
-+HYPERVISOR_kexec_op(
-+	unsigned long op, void *args)
++static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
 +{
-+	return _hypercall2(int, kexec_op, op, args);
++	if (full) {
++		pte_t pte = *ptep;
++		if (mm->context.pinned)
++			xen_l1_entry_update(ptep, __pte(0));
++		else
++			*ptep = __pte(0);
++		return pte;
++	}
++	return ptep_get_and_clear(mm, addr, ptep);
 +}
 +
-+#endif /* __HYPERCALL_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hypervisor.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hypervisor.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/hypervisor.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/mach-xen/asm/hypervisor.h>
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/io.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/io.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/io.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,330 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
-+
-+#include <asm/fixmap.h>
++#define ptep_clear_flush(vma, addr, ptep)			\
++({								\
++	pte_t *__ptep = (ptep);					\
++	pte_t __res = *__ptep;					\
++	if (!pte_none(__res) &&					\
++	    ((vma)->vm_mm != current->mm ||			\
++	     HYPERVISOR_update_va_mapping(addr,	__pte(0), 	\
++			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++				UVMF_INVLPG|UVMF_MULTI))) {	\
++		__ptep->pte = 0;				\
++		flush_tlb_page(vma, addr);			\
++	}							\
++	__res;							\
++})
 +
 +/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
-+ *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
 + */
++#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
++static inline int pte_user(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_read(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte)		{ return __pte_val(pte) & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte)		{ return __pte_val(pte) & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte)		{ return __pte_val(pte) & _PAGE_RW; }
++static inline int pte_file(pte_t pte)		{ return __pte_val(pte) & _PAGE_FILE; }
++static inline int pte_huge(pte_t pte)		{ return __pte_val(pte) & _PAGE_PSE; }
 +
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
-+ *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
-+ *
-+ *		Linus
-+ */
++static inline pte_t pte_rdprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte)	{ __pte_val(pte) |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte)	{ __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte)	{ __pte_val(pte) |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte)	{ __pte_val(pte) |= _PAGE_PSE; return pte; }
 +
-+ /*
-+  *  Bit simplified and optimized by Jan Hubicka
-+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+  *
-+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+  *  isa_read[wl] and isa_write[wl] fixed
-+  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
-+  */
++#define ptep_test_and_clear_dirty(vma, addr, ptep)			\
++({									\
++	pte_t __pte = *(ptep);						\
++	int __ret = pte_dirty(__pte);					\
++	if (__ret)							\
++		set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
++	__ret;								\
++})
 +
-+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
++#define ptep_test_and_clear_young(vma, addr, ptep)			\
++({									\
++	pte_t __pte = *(ptep);						\
++	int __ret = pte_young(__pte);					\
++	if (__ret)							\
++		set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
++	__ret;								\
++})
 +
-+#ifdef REALLY_SLOW_IO
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-+#else
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-+#endif
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	if (pte_write(pte))
++		set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
 +
 +/*
-+ * Talk about misusing macros..
++ * Macro to mark a page protection value as "uncacheable".
 + */
-+#define __OUT1(s,x) \
-+static inline void out##s(unsigned x value, unsigned short port) {
++#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
 +
-+#define __OUT2(s,s1,s2) \
-+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
++static inline int pmd_large(pmd_t pte) { 
++	return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
++} 	
 +
-+#define __OUT(s,s1,x) \
-+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
 +
-+#define __IN1(s) \
-+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
 +
-+#define __IN2(s,s1,s2) \
-+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
++/*
++ * Level 4 access.
++ * Never use these in the common code.
++ */
++#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
++#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
++#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
++#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
 +
-+#define __IN(s,s1,i...) \
-+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++/* PUD - Level3 access */
++/* to find an entry in a page-table-directory. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
 +
-+#define __INS(s) \
-+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; ins" #s \
-+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++/* PMD  - Level 2 access */
++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 +
-+#define __OUTS(s) \
-+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; outs" #s \
-+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++                                  pmd_index(address))
++#define pmd_none(x)	(!__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++   can temporarily clear it. */
++#define pmd_present(x)	(__pmd_val(x))
++#else
++#define pmd_present(x)	(__pmd_val(x) & _PAGE_PRESENT)
++#endif
++#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
++#define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
++		    != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
++#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
++#define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 +
-+#define RETURN_TYPE unsigned char
-+__IN(b,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned short
-+__IN(w,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned int
-+__IN(l,"")
-+#undef RETURN_TYPE
++#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
++#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
 +
-+__OUT(b,"b",char)
-+__OUT(w,"w",short)
-+__OUT(l,,int)
++/* PTE - Level 1 access. */
 +
-+__INS(b)
-+__INS(w)
-+__INS(l)
++/* page, protection -> pte */
++#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++ 
++/* physical address -> PTE */
++static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
++{ 
++	unsigned long pteval;
++	pteval = physpage | pgprot_val(pgprot);
++	return __pte(pteval);
++}
++ 
++/* Change flags of a PTE */
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{ 
++	/*
++	 * Since this might change the present bit (which controls whether
++	 * a pte_t object has undergone p2m translation), we must use
++	 * pte_val() on the input pte and __pte() for the return value.
++	 */
++	unsigned long pteval = pte_val(pte);
 +
-+__OUTS(b)
-+__OUTS(w)
-+__OUTS(l)
++	pteval &= _PAGE_CHG_MASK;
++	pteval |= pgprot_val(newprot);
++	pteval &= __supported_pte_mask;
++	return __pte(pteval);
++}
 +
-+#define IO_SPACE_LIMIT 0xffff
++#define pte_index(address) \
++		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++			pte_index(address))
 +
-+#if defined(__KERNEL__) && __x86_64__
++/* x86-64 always has all page tables mapped. */
++#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
++#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
++#define pte_unmap(pte) /* NOP */
++#define pte_unmap_nested(pte) /* NOP */ 
 +
-+#include <linux/vmalloc.h>
++#define update_mmu_cache(vma,address,pte) do { } while (0)
 +
-+#ifndef __i386__
 +/*
-+ * Change virtual addresses to physical addresses and vv.
-+ * These are pretty trivial
++ * Rules for using ptep_establish: the pte MUST be a user pte, and
++ * must be a present->present transition.
 + */
-+static inline unsigned long virt_to_phys(volatile void * address)
-+{
-+	return __pa(address);
-+}
-+
-+static inline void * phys_to_virt(unsigned long address)
-+{
-+	return __va(address);
-+}
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval)			\
++	do {								\
++		if ( likely((vma)->vm_mm == current->mm) ) {		\
++			BUG_ON(HYPERVISOR_update_va_mapping(address,	\
++				pteval,					\
++				(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++					UVMF_INVLPG|UVMF_MULTI));	\
++		} else {						\
++			xen_l1_entry_update(ptep, pteval);		\
++			flush_tlb_page(vma, address);			\
++		}							\
++	} while (0)
 +
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+#endif
++/* We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time. */
++#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
++	do {								\
++		if (dirty)						\
++			ptep_establish(vma, address, ptep, entry);	\
++	} while (0)
 +
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val >> 1) & 0x3f)
++#define __swp_offset(x)			((x).val >> 8)
++#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte)		((swp_entry_t) { __pte_val(pte) })
++#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
 +
-+#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
-+				  (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
-+				  (unsigned long) (bv)->bv_offset)
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++void vmalloc_sync_all(void);
 +
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
-+	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+	  bvec_to_pseudophys((vec2))))
++#endif /* !__ASSEMBLY__ */
 +
-+#include <asm-generic/iomap.h>
++extern int kern_addr_valid(unsigned long addr); 
 +
-+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++#define DOMID_LOCAL (0xFFFFU)
 +
-+static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
-+{
-+	return __ioremap(offset, size, 0);
-+}
++struct vm_area_struct;
 +
-+extern void *bt_ioremap(unsigned long addr, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
-+#define early_ioremap bt_ioremap
-+#define early_iounmap bt_iounmap
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++                            unsigned long address,
++                            unsigned long mfn,
++                            unsigned long size,
++                            pgprot_t prot,
++                            domid_t  domid);
 +
-+/*
-+ * This one maps high address device memory and turns off caching for that area.
-+ * it's useful if some control registers are in such an area and write combining
-+ * or read caching is not desirable:
-+ */
-+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid);
 +
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
++int create_lookup_pte_addr(struct mm_struct *mm,
++                           unsigned long address,
++                           uint64_t *ptep);
 +
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++int touch_pte_range(struct mm_struct *mm,
++                    unsigned long address,
++                    unsigned long size);
 +
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++		unsigned long addr, unsigned long end, pgprot_t newprot);
 +
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
-+ */
++#define arch_change_pte_range(mm, pmd, addr, end, newprot)	\
++		xen_change_pte_range(mm, pmd, addr, end, newprot)
 +
-+static inline __u8 __readb(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u8 *)addr;
-+}
-+static inline __u16 __readw(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u16 *)addr;
-+}
-+static __always_inline __u32 __readl(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u32 *)addr;
-+}
-+static inline __u64 __readq(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u64 *)addr;
-+}
-+#define readb(x) __readb(x)
-+#define readw(x) __readw(x)
-+#define readl(x) __readl(x)
-+#define readq(x) __readq(x)
-+#define readb_relaxed(a) readb(a)
-+#define readw_relaxed(a) readw(a)
-+#define readl_relaxed(a) readl(a)
-+#define readq_relaxed(a) readq(a)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
-+#define __raw_readq readq
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
++		direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
 +
-+#define mmiowb()
++#define MK_IOSPACE_PFN(space, pfn)	(pfn)
++#define GET_IOSPACE(pfn)		0
++#define GET_PFN(pfn)			(pfn)
 +
-+static inline void __writel(__u32 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u32 *)addr = b;
-+}
-+static inline void __writeq(__u64 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u64 *)addr = b;
-+}
-+static inline void __writeb(__u8 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u8 *)addr = b;
-+}
-+static inline void __writew(__u16 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u16 *)addr = b;
-+}
-+#define writeq(val,addr) __writeq((val),(addr))
-+#define writel(val,addr) __writel((val),(addr))
-+#define writew(val,addr) __writew((val),(addr))
-+#define writeb(val,addr) __writeb((val),(addr))
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+#define __raw_writeq writeq
++#define HAVE_ARCH_UNMAPPED_AREA
 +
-+void __memcpy_fromio(void*,unsigned long,unsigned);
-+void __memcpy_toio(unsigned long,const void*,unsigned);
++#define pgtable_cache_init()   do { } while (0)
++#define check_pgt_cache()      do { } while (0)
 +
-+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
-+{
-+	__memcpy_fromio(to,(unsigned long)from,len);
-+}
-+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
-+{
-+	__memcpy_toio((unsigned long)to,from,len);
-+}
++#define PAGE_AGP    PAGE_KERNEL_NOCACHE
++#define HAVE_PAGE_AGP 1
 +
-+void memset_io(volatile void __iomem *a, int b, size_t c);
++/* fs/proc/kcore.c */
++#define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
++#define	kc_offset_to_vaddr(o) \
++   (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
 +
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
 +
++#endif /* _X86_64_PGTABLE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/processor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/processor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,502 @@
 +/*
-+ * Again, x86-64 does not require mem IO specific function.
-+ */
-+
-+#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void *)(b),(c),(d))
-+
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
++ * include/asm-x86_64/processor.h
 + *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
++ * Copyright (C) 1994 Linus Torvalds
 + */
-+ 
-+static inline int check_signature(void __iomem *io_addr,
-+	const unsigned char *signature, int length)
-+{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
-+}
 +
-+/* Nothing to do */
++#ifndef __ASM_X86_64_PROCESSOR_H
++#define __ASM_X86_64_PROCESSOR_H
 +
-+#define dma_cache_inv(_start,_size)		do { } while (0)
-+#define dma_cache_wback(_start,_size)		do { } while (0)
-+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <linux/threads.h>
++#include <asm/msr.h>
++#include <asm/current.h>
++#include <asm/system.h>
++#include <asm/mmsegment.h>
++#include <asm/percpu.h>
++#include <linux/personality.h>
++#include <linux/cpumask.h>
 +
-+#define flush_write_buffers() 
++#define TF_MASK		0x00000100
++#define IF_MASK		0x00000200
++#define IOPL_MASK	0x00003000
++#define NT_MASK		0x00004000
++#define VM_MASK		0x00020000
++#define AC_MASK		0x00040000
++#define VIF_MASK	0x00080000	/* virtual interrupt flag */
++#define VIP_MASK	0x00100000	/* virtual interrupt pending */
++#define ID_MASK		0x00200000
 +
-+extern int iommu_bio_merge;
-+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
++#define desc_empty(desc) \
++               (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
 +
 +/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
 + */
-+#define xlate_dev_mem_ptr(p, sz)	ioremap(p, sz)
-+#define xlate_dev_mem_ptr_unmap(p)	iounmap(p)
++#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
 +
 +/*
-+ * Convert a virtual cached pointer to an uncached pointer
++ *  CPU type and hardware bug flags. Kept separately for each CPU.
 + */
-+#define xlate_dev_kmem_ptr(p)	p
 +
-+#endif /* __KERNEL__ */
++struct cpuinfo_x86 {
++	__u8	x86;		/* CPU family */
++	__u8	x86_vendor;	/* CPU vendor */
++	__u8	x86_model;
++	__u8	x86_mask;
++	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
++	__u32	x86_capability[NCAPINTS];
++	char	x86_vendor_id[16];
++	char	x86_model_id[64];
++	int 	x86_cache_size;  /* in KB */
++	int	x86_clflush_size;
++	int	x86_cache_alignment;
++	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
++        __u8    x86_virt_bits, x86_phys_bits;
++	__u8	x86_max_cores;	/* cpuid returned max cores value */
++        __u32   x86_power; 	
++	__u32   extended_cpuid_level;	/* Max extended CPUID function supported */
++	unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
++#endif
++	__u8	apicid;
++#ifdef CONFIG_SMP
++	__u8	booted_cores;	/* number of cores as seen by OS */
++	__u8	phys_proc_id;	/* Physical Processor id. */
++	__u8	cpu_core_id;	/* Core id. */
++#endif
++} ____cacheline_aligned;
 +
-+#define ARCH_HAS_DEV_MEM
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NUM 8
++#define X86_VENDOR_UNKNOWN 0xff
 +
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/irqflags.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/irqflags.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/irqflags.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,139 @@
-+/*
-+ * include/asm-x86_64/irqflags.h
-+ *
-+ * IRQ flags handling
-+ *
-+ * This file gets included from lowlevel asm headers too, to provide
-+ * wrapped versions of the local_irq_*() APIs, based on the
-+ * raw_local_irq_*() functions from the lowlevel headers.
-+ */
-+#ifndef _ASM_IRQFLAGS_H
-+#define _ASM_IRQFLAGS_H
 +
-+#ifndef __ASSEMBLY__
++extern char ignore_irq13;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
 +/*
-+ * Interrupt control:
++ * EFLAGS bits
 + */
++#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
 +
 +/*
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
++ * Intel CPU features in CR4
 + */
-+
-+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
-+
-+#define raw_local_save_flags(flags) \
-+		do { (flags) = __raw_local_save_flags(); } while (0)
-+
-+#define raw_local_irq_restore(x)					\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	_vcpu = current_vcpu_info();		\
-+	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
-+		barrier(); /* unmask then check (avoid races) */	\
-+		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
-+			force_evtchn_callback();			\
-+	}								\
-+} while (0)
-+
-+#ifdef CONFIG_X86_VSMP
++#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
++#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
++#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
++#define X86_CR4_DE		0x0008	/* enable debugging extensions */
++#define X86_CR4_PSE		0x0010	/* enable page size extensions */
++#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
++#define X86_CR4_MCE		0x0040	/* Machine check enable */
++#define X86_CR4_PGE		0x0080	/* enable global pages */
++#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
 +
 +/*
-+ * Interrupt control for the VSMP architecture:
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
 + */
++extern unsigned long mmu_cr4_features;
 +
-+static inline void raw_local_irq_disable(void)
-+{
-+	unsigned long flags = __raw_local_save_flags();
-+
-+	raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
-+}
-+
-+static inline void raw_local_irq_enable(void)
-+{
-+	unsigned long flags = __raw_local_save_flags();
-+
-+	raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
-+}
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
++static inline void set_in_cr4 (unsigned long mask)
 +{
-+	return !(flags & (1<<9)) || (flags & (1 << 18));
++	mmu_cr4_features |= mask;
++	__asm__("movq %%cr4,%%rax\n\t"
++		"orq %0,%%rax\n\t"
++		"movq %%rax,%%cr4\n"
++		: : "irg" (mask)
++		:"ax");
 +}
 +
-+#else /* CONFIG_X86_VSMP */
-+
-+#define raw_local_irq_disable()						\
-+do {									\
-+	current_vcpu_info()->evtchn_upcall_mask = 1;					\
-+	barrier();							\
-+} while (0)
-+
-+#define raw_local_irq_enable()						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	_vcpu = current_vcpu_info();		\
-+	_vcpu->evtchn_upcall_mask = 0;					\
-+	barrier(); /* unmask then check (avoid races) */		\
-+	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
-+		force_evtchn_callback();				\
-+} while (0)
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
++static inline void clear_in_cr4 (unsigned long mask)
 +{
-+	return (flags != 0);
++	mmu_cr4_features &= ~mask;
++	__asm__("movq %%cr4,%%rax\n\t"
++		"andq %0,%%rax\n\t"
++		"movq %%rax,%%cr4\n"
++		: : "irg" (~mask)
++		:"ax");
 +}
 +
-+#endif
 +
 +/*
-+ * For spinlocks, etc.:
++ * User space process size. 47bits minus one guard page.
 + */
++#define TASK_SIZE64	(0x800000000000UL - 4096)
 +
-+#define __raw_local_irq_save()						\
-+({									\
-+	unsigned long flags = __raw_local_save_flags();			\
-+									\
-+	raw_local_irq_disable();					\
-+									\
-+	flags;								\
-+})
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
 +
-+#define raw_local_irq_save(flags) \
-+		do { (flags) = __raw_local_irq_save(); } while (0)
++#define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++#define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
 +
-+#define raw_irqs_disabled()						\
-+({									\
-+	unsigned long flags = __raw_local_save_flags();			\
-+									\
-+	raw_irqs_disabled_flags(flags);					\
-+})
++#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE/3)
 +
 +/*
-+ * Used in the idle loop; sti takes one instruction cycle
-+ * to complete:
++ * Size of io_bitmap.
 + */
-+void raw_safe_halt(void);
++#define IO_BITMAP_BITS  65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
 +
-+/*
-+ * Used when interrupts are already enabled or to
-+ * shutdown the processor:
-+ */
-+void halt(void);
++struct i387_fxsave_struct {
++	u16	cwd;
++	u16	swd;
++	u16	twd;
++	u16	fop;
++	u64	rip;
++	u64	rdp; 
++	u32	mxcsr;
++	u32	mxcsr_mask;
++	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
++	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */
++	u32	padding[24];
++} __attribute__ ((aligned (16)));
 +
-+#else /* __ASSEMBLY__: */
-+# ifdef CONFIG_TRACE_IRQFLAGS
-+#  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk
-+#  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk
-+# else
-+#  define TRACE_IRQS_ON
-+#  define TRACE_IRQS_OFF
-+# endif
-+#endif
++union i387_union {
++	struct i387_fxsave_struct	fxsave;
++};
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/irq.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/irq.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/irq.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,38 @@
-+#ifndef _ASM_IRQ_H
-+#define _ASM_IRQ_H
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++	u32 reserved1;
++	u64 rsp0;	
++	u64 rsp1;
++	u64 rsp2;
++	u64 reserved2;
++	u64 ist[7];
++	u32 reserved3;
++	u32 reserved4;
++	u16 reserved5;
++	u16 io_bitmap_base;
++	/*
++	 * The extra 1 is there because the CPU will access an
++	 * additional byte beyond the end of the IO permission
++	 * bitmap. The extra byte must be all 1 bits, and must
++	 * be within the limit. Thus we have:
++	 *
++	 * 128 bytes, the bitmap itself, for ports 0..0x3ff
++	 * 8 bytes, for an extra "long" of ~0UL
++	 */
++	unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++} __attribute__((packed)) ____cacheline_aligned;
 +
-+/*
-+ *	linux/include/asm/irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ */
++DECLARE_PER_CPU(struct tss_struct,init_tss);
++#endif
 +
-+#include <linux/sched.h>
-+/* include comes from machine specific directory */
-+#include "irq_vectors.h"
-+#include <asm/thread_info.h>
 +
-+static __inline__ int irq_canonicalize(int irq)
-+{
-+	return ((irq == 2) ? 9 : irq);
-+}
++extern struct cpuinfo_x86 boot_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++/* Save the original ist values for checking stack pointers during debugging */
++struct orig_ist {
++	unsigned long ist[7];
++};
++DECLARE_PER_CPU(struct orig_ist, orig_ist);
++#endif
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
++#ifdef CONFIG_X86_VSMP
++#define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT)
++#define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
++#else
++#define ARCH_MIN_TASKALIGN	16
++#define ARCH_MIN_MMSTRUCT_ALIGN	0
 +#endif
 +
-+#define KDB_VECTOR	0xf9
++struct thread_struct {
++	unsigned long	rsp0;
++	unsigned long	rsp;
++	unsigned long 	userrsp;	/* Copy from PDA */ 
++	unsigned long	fs;
++	unsigned long	gs;
++	unsigned short	es, ds, fsindex, gsindex;	
++/* Hardware debugging registers */
++	unsigned long	debugreg0;  
++	unsigned long	debugreg1;  
++	unsigned long	debugreg2;  
++	unsigned long	debugreg3;  
++	unsigned long	debugreg6;  
++	unsigned long	debugreg7;  
++/* fault info */
++	unsigned long	cr2, trap_no, error_code;
++/* floating point info */
++	union i387_union	i387  __attribute__((aligned(16)));
++/* IO permissions. the bitmap could be moved into the GDT, that would make
++   switch faster for a limited number of ioperm using tasks. -AK */
++	int		ioperm;
++	unsigned long	*io_bitmap_ptr;
++	unsigned io_bitmap_max;
++/* cached TLS descriptors. */
++	u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
++	unsigned int	iopl;
++} __attribute__((aligned(16)));
 +
-+# define irq_ctx_init(cpu) do { } while (0)
++#define INIT_THREAD  { \
++	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <linux/cpumask.h>
-+extern void fixup_irqs(cpumask_t map);
++#ifndef CONFIG_X86_NO_TSS
++#define INIT_TSS  { \
++	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
 +#endif
 +
-+#define __ARCH_HAS_DO_SOFTIRQ 1
-+
-+#endif /* _ASM_IRQ_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/maddr.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/maddr.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/maddr.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,161 @@
-+#ifndef _X86_64_MADDR_H
-+#define _X86_64_MADDR_H
++#define INIT_MMAP \
++{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
 +
-+#include <xen/features.h>
-+#include <xen/interface/xen.h>
++#define start_thread(regs,new_rip,new_rsp) do { \
++	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));	 \
++	load_gs_index(0);							\
++	(regs)->rip = (new_rip);						 \
++	(regs)->rsp = (new_rsp);						 \
++	write_pda(oldrsp, (new_rsp));						 \
++	(regs)->cs = __USER_CS;							 \
++	(regs)->ss = __USER_DS;							 \
++	(regs)->eflags = 0x200;							 \
++	set_fs(USER_DS);							 \
++} while(0) 
 +
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY	(~0UL)
-+#define FOREIGN_FRAME_BIT	(1UL<<63)
-+#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
++#define get_debugreg(var, register)				\
++	var = HYPERVISOR_get_debugreg(register)
++#define set_debugreg(value, register) do {			\
++	if (HYPERVISOR_set_debugreg(register, value))		\
++		BUG();						\
++} while (0)
 +
-+/* Definitions for machine and pseudophysical addresses. */
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
++struct task_struct;
++struct mm_struct;
 +
-+#ifdef CONFIG_XEN
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
 +
-+extern unsigned long *phys_to_machine_mapping;
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
 +
-+#undef machine_to_phys_mapping
-+extern unsigned long *machine_to_phys_mapping;
-+extern unsigned int   machine_to_phys_order;
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 +
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+{
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return pfn;
-+	BUG_ON(end_pfn && pfn >= end_pfn);
-+	return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
-+}
++/*
++ * Return saved PC of a blocked thread.
++ * What is this good for? it will be always the scheduler or ret_from_fork.
++ */
++#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
 +
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-+{
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return 1;
-+	BUG_ON(end_pfn && pfn >= end_pfn);
-+	return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-+}
++extern unsigned long get_wchan(struct task_struct *p);
++#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
++#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
 +
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+	unsigned long pfn;
 +
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return mfn;
++struct microcode_header {
++	unsigned int hdrver;
++	unsigned int rev;
++	unsigned int date;
++	unsigned int sig;
++	unsigned int cksum;
++	unsigned int ldrver;
++	unsigned int pf;
++	unsigned int datasize;
++	unsigned int totalsize;
++	unsigned int reserved[3];
++};
 +
-+	if (unlikely((mfn >> machine_to_phys_order) != 0))
-+		return end_pfn;
++struct microcode {
++	struct microcode_header hdr;
++	unsigned int bits[0];
++};
 +
-+	/* The array access can fail (e.g., device space beyond end of RAM). */
-+	asm (
-+		"1:	movq %1,%0\n"
-+		"2:\n"
-+		".section .fixup,\"ax\"\n"
-+		"3:	movq %2,%0\n"
-+		"	jmp  2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 8\n"
-+		"	.quad 1b,3b\n"
-+		".previous"
-+		: "=r" (pfn)
-+		: "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
 +
-+	return pfn;
-+}
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++	unsigned int sig;
++	unsigned int pf;
++	unsigned int cksum;
++};
 +
-+/*
-+ * We detect special mappings in one of two ways:
-+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
-+ *     to be outside our maximum possible pseudophys range.
-+ *  2. If the MFN belongs to a different domain then we will certainly
-+ *     not have MFN in our p2m table. Conversely, if the page is ours,
-+ *     then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-+{
-+	unsigned long pfn = mfn_to_pfn(mfn);
-+	if ((pfn < end_pfn)
-+	    && !xen_feature(XENFEAT_auto_translated_physmap)
-+	    && (phys_to_machine_mapping[pfn] != mfn))
-+		return end_pfn; /* force !pfn_valid() */
-+	return pfn;
-+}
++struct extended_sigtable {
++	unsigned int count;
++	unsigned int cksum;
++	unsigned int reserved[3];
++	struct extended_signature sigs[0];
++};
 +
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+	BUG_ON(end_pfn && pfn >= end_pfn);
-+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+		return;
-+	}
-+	phys_to_machine_mapping[pfn] = mfn;
-+}
 +
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+	return machine;
-+}
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
 +
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+	return phys;
-+}
++/* Opteron nops */
++#define K8_NOP1 ".byte 0x90\n"
++#define K8_NOP2	".byte 0x66,0x90\n" 
++#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
++#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
++#define K8_NOP5	K8_NOP3 K8_NOP2 
++#define K8_NOP6	K8_NOP3 K8_NOP3
++#define K8_NOP7	K8_NOP4 K8_NOP3
++#define K8_NOP8	K8_NOP4 K8_NOP4
 +
-+static inline paddr_t pte_phys_to_machine(paddr_t phys)
-+{
-+	maddr_t machine;
-+	machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
-+	return machine;
-+}
++#define ASM_NOP_MAX 8
 +
-+static inline paddr_t pte_machine_to_phys(maddr_t machine)
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
 +{
-+	paddr_t phys;
-+	phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
-+	return phys;
++	__asm__ __volatile__("rep;nop": : :"memory");
 +}
 +
-+#define __pte_ma(x)     ((pte_t) { (x) } )
-+#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
++/* Stop speculative execution */
++static inline void sync_core(void)
++{ 
++	int tmp;
++	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++} 
 +
-+#else /* !CONFIG_XEN */
++#define cpu_has_fpu 1
 +
-+#define pfn_to_mfn(pfn) (pfn)
-+#define mfn_to_pfn(mfn) (mfn)
-+#define mfn_to_local_pfn(mfn) (mfn)
-+#define set_phys_to_machine(pfn, mfn) ((void)0)
-+#define phys_to_machine_mapping_valid(pfn) (1)
-+#define phys_to_machine(phys) ((maddr_t)(phys))
-+#define machine_to_phys(mach) ((paddr_t)(mach))
-+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
-+#define __pte_ma(x) __pte(x)
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(void *x) 
++{ 
++	asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
++} 
 +
-+#endif /* !CONFIG_XEN */
++#define ARCH_HAS_PREFETCHW 1
++static inline void prefetchw(void *x) 
++{ 
++	alternative_input("prefetcht0 (%1)",
++			  "prefetchw (%1)",
++			  X86_FEATURE_3DNOW,
++			  "r" (x));
++} 
 +
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
++#define ARCH_HAS_SPINLOCK_PREFETCH 1
 +
-+#endif /* _X86_64_MADDR_H */
++#define spin_lock_prefetch(x)  prefetchw(x)
 +
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/mmu_context.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/mmu_context.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/mmu_context.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,136 @@
-+#ifndef __X86_64_MMU_CONTEXT_H
-+#define __X86_64_MMU_CONTEXT_H
++#define cpu_relax()   rep_nop()
 +
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/page.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
++/*
++ *      NSC/Cyrix CPU configuration register indexes
++ */
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
 +
 +/*
-+ * possibly do the LDT unload here?
++ *      NSC/Cyrix CPU indexed register access macros
 + */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
 +
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+	if (read_pda(mmu_state) == TLBSTATE_OK) 
-+		write_pda(mmu_state, TLBSTATE_LAZY);
-+#endif
-+}
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
 +
-+#define prepare_arch_switch(next)	__prepare_arch_switch()
++#define setCx86(reg, data) do { \
++	outb((reg), 0x22); \
++	outb((data), 0x23); \
++} while (0)
 +
-+static inline void __prepare_arch_switch(void)
++static inline void serialize_cpu(void)
 +{
-+	/*
-+	 * Save away %es, %ds, %fs and %gs. Must happen before reload
-+	 * of cr3/ldt (i.e., not in __switch_to).
-+	 */
-+	__asm__ __volatile__ (
-+		"mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
-+		: "=m" (current->thread.es),
-+		  "=m" (current->thread.ds),
-+		  "=m" (current->thread.fsindex),
-+		  "=m" (current->thread.gsindex) );
-+
-+	if (current->thread.ds)
-+		__asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
-+
-+	if (current->thread.es)
-+		__asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
-+
-+	if (current->thread.fsindex) {
-+		__asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
-+		current->thread.fs = 0;
-+	}
-+
-+	if (current->thread.gsindex) {
-+		load_gs_index(0);
-+		current->thread.gs = 0;
-+	}
++	__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
 +}
 +
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void load_cr3(pgd_t *pgd)
++static inline void __monitor(const void *eax, unsigned long ecx,
++		unsigned long edx)
 +{
-+	asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
-+		     "memory");
++	/* "monitor %eax,%ecx,%edx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc8;"
++		: :"a" (eax), "c" (ecx), "d"(edx));
 +}
 +
-+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
-+			     struct task_struct *tsk)
++static inline void __mwait(unsigned long eax, unsigned long ecx)
 +{
-+	unsigned cpu = smp_processor_id();
-+	struct mmuext_op _op[3], *op = _op;
++	/* "mwait %eax,%ecx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc9;"
++		: :"a" (eax), "c" (ecx));
++}
 +
-+	if (likely(prev != next)) {
-+		BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
-+		       !next->context.pinned);
++#define stack_current() \
++({								\
++	struct thread_info *ti;					\
++	asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
++	ti->task;					\
++})
 +
-+		/* stop flush ipis for the previous mm */
-+		cpu_clear(cpu, prev->cpu_vm_mask);
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+		write_pda(mmu_state, TLBSTATE_OK);
-+		write_pda(active_mm, next);
-+#endif
-+		cpu_set(cpu, next->cpu_vm_mask);
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
 +
-+		/* load_cr3(next->pgd) */
-+		op->cmd = MMUEXT_NEW_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+		op++;
++extern unsigned long boot_option_idle_override;
++/* Boot loader type from the setup header */
++extern int bootloader_type;
 +
-+		/* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
-+		op->cmd = MMUEXT_NEW_USER_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
-+		op++;
-+		
-+		if (unlikely(next->context.ldt != prev->context.ldt)) {
-+			/* load_LDT_nolock(&next->context, cpu) */
-+			op->cmd = MMUEXT_SET_LDT;
-+			op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+			op->arg2.nr_ents     = next->context.size;
-+			op++;
-+		}
++#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
 +
-+		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+	}
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+	else {
-+		write_pda(mmu_state, TLBSTATE_OK);
-+		if (read_pda(active_mm) != next)
-+			out_of_line_bug();
-+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-+			/* We were in lazy tlb mode and leave_mm disabled 
-+			 * tlb flush IPI delivery. We must reload CR3
-+			 * to make sure to use no freed page tables.
-+			 */
-+                        load_cr3(next->pgd);
-+                        xen_new_user_pt(__pa(__user_pgd(next->pgd)));		
-+			load_LDT_nolock(&next->context, cpu);
-+		}
-+	}
-+#endif
-+}
++#endif /* __ASM_X86_64_PROCESSOR_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/ptrace.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/ptrace.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,127 @@
++#ifndef _X86_64_PTRACE_H
++#define _X86_64_PTRACE_H
 +
-+#define deactivate_mm(tsk,mm)	do { \
-+	load_gs_index(0); \
-+	asm volatile("movl %0,%%fs"::"r"(0));  \
-+} while(0)
++#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) 
++#define R15 0
++#define R14 8
++#define R13 16
++#define R12 24
++#define RBP 32
++#define RBX 40
++/* arguments: interrupts/non tracing syscalls only save upto here*/
++#define R11 48
++#define R10 56	
++#define R9 64
++#define R8 72
++#define RAX 80
++#define RCX 88
++#define RDX 96
++#define RSI 104
++#define RDI 112
++#define ORIG_RAX 120       /* = ERROR */ 
++/* end of arguments */ 	
++/* cpu exception frame or undefined in case of fast syscall. */
++#define RIP 128
++#define CS 136
++#define EFLAGS 144
++#define RSP 152
++#define SS 160
++#define ARGOFFSET R11
++#endif /* __ASSEMBLY__ */
 +
-+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-+{
-+	if (!next->context.pinned)
-+		mm_pin(next);
-+	switch_mm(prev, next, NULL);
-+}
++/* top of stack page */ 
++#define FRAME_SIZE 168
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/mmu.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/mmu.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/mmu.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,38 @@
-+#ifndef __x86_64_MMU_H
-+#define __x86_64_MMU_H
++#define PTRACE_OLDSETOPTIONS         21
 +
-+#include <linux/spinlock.h>
-+#include <asm/semaphore.h>
++#ifndef __ASSEMBLY__ 
++
++struct pt_regs {
++	unsigned long r15;
++	unsigned long r14;
++	unsigned long r13;
++	unsigned long r12;
++	unsigned long rbp;
++	unsigned long rbx;
++/* arguments: non interrupts/non tracing syscalls only save upto here*/
++ 	unsigned long r11;
++	unsigned long r10;	
++	unsigned long r9;
++	unsigned long r8;
++	unsigned long rax;
++	unsigned long rcx;
++	unsigned long rdx;
++	unsigned long rsi;
++	unsigned long rdi;
++	unsigned long orig_rax;
++/* end of arguments */ 	
++/* cpu exception frame or undefined */
++	unsigned long rip;
++	unsigned long cs;
++	unsigned long eflags; 
++	unsigned long rsp; 
++	unsigned long ss;
++/* top of stack page */ 
++};
 +
-+/*
-+ * The x86_64 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct { 
-+	void *ldt;
-+	rwlock_t ldtlock; 
-+	int size;
-+	struct semaphore sem; 
-+#ifdef CONFIG_XEN
-+	unsigned pinned:1;
-+	unsigned has_foreign_mappings:1;
-+	struct list_head unpinned;
 +#endif
-+} mm_context_t;
 +
-+#ifdef CONFIG_XEN
-+extern struct list_head mm_unpinned;
-+extern spinlock_t mm_unpinned_lock;
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS            12
++#define PTRACE_SETREGS            13
++#define PTRACE_GETFPREGS          14
++#define PTRACE_SETFPREGS          15
++#define PTRACE_GETFPXREGS         18
++#define PTRACE_SETFPXREGS         19
 +
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++/* only useful for access 32bit programs */
++#define PTRACE_GET_THREAD_AREA    25
++#define PTRACE_SET_THREAD_AREA    26
 +
-+/* kernel/fork.c:dup_mmap hook */
-+extern void _arch_dup_mmap(struct mm_struct *mm);
-+#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
-+#endif
++#define PTRACE_ARCH_PRCTL	  30	/* arch_prctl for child */
 +
++#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
++#define user_mode(regs) (!!((regs)->cs & 3))
++#define user_mode_vm(regs) user_mode(regs)
++#define instruction_pointer(regs) ((regs)->rip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/msr.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/msr.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/msr.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/msr.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,399 @@
-+#ifndef X86_64_MSR_H
-+#define X86_64_MSR_H 1
 +
-+#ifndef __ASSEMBLY__
-+/*
-+ * Access to machine-specific registers (available on 586 and better only)
-+ * Note: the rd* operations modify the parameters directly (without using
-+ * pointer indirection), this allows gcc to optimize better
-+ */
++#include <linux/compiler.h>
 +
-+#define rdmsr(msr,val1,val2) \
-+       __asm__ __volatile__("rdmsr" \
-+			    : "=a" (val1), "=d" (val2) \
-+			    : "c" (msr))
++void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
 +
++struct task_struct;
 +
-+#define rdmsrl(msr,val) do { unsigned long a__,b__; \
-+       __asm__ __volatile__("rdmsr" \
-+			    : "=a" (a__), "=d" (b__) \
-+			    : "c" (msr)); \
-+       val = a__ | (b__<<32); \
-+} while(0)
++extern unsigned long
++convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
 +
-+#define wrmsr(msr,val1,val2) \
-+     __asm__ __volatile__("wrmsr" \
-+			  : /* no outputs */ \
-+			  : "c" (msr), "a" (val1), "d" (val2))
++enum {
++        EF_CF   = 0x00000001,
++        EF_PF   = 0x00000004,
++        EF_AF   = 0x00000010,
++        EF_ZF   = 0x00000040,
++        EF_SF   = 0x00000080,
++        EF_TF   = 0x00000100,
++        EF_IE   = 0x00000200,
++        EF_DF   = 0x00000400,
++        EF_OF   = 0x00000800,
++        EF_IOPL = 0x00003000,
++        EF_IOPL_RING0 = 0x00000000,
++        EF_IOPL_RING1 = 0x00001000,
++        EF_IOPL_RING2 = 0x00002000,
++        EF_NT   = 0x00004000,   /* nested task */
++        EF_RF   = 0x00010000,   /* resume */
++        EF_VM   = 0x00020000,   /* virtual mode */
++        EF_AC   = 0x00040000,   /* alignment */
++        EF_VIF  = 0x00080000,   /* virtual interrupt */
++        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
++        EF_ID   = 0x00200000,   /* id */
++};
 +
-+#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 
++#endif
 +
-+/* wrmsr with exception handling */
-+#define wrmsr_safe(msr,a,b) ({ int ret__;			\
-+	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
-+		     "1:\n\t"					\
-+		     ".section .fixup,\"ax\"\n\t"		\
-+		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
-+		     ".previous\n\t"				\
-+ 		     ".section __ex_table,\"a\"\n"		\
-+		     "   .align 8\n\t"				\
-+		     "   .quad 	2b,3b\n\t"			\
-+		     ".previous"				\
-+		     : "=a" (ret__)				\
-+		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
-+	ret__; })
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/smp.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/smp.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,150 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
 +
-+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/bitops.h>
++extern int disable_apic;
++#endif
 +
-+#define rdmsr_safe(msr,a,b) \
-+	({ int ret__;						\
-+	  asm volatile ("1:       rdmsr\n"			\
-+                      "2:\n"					\
-+                      ".section .fixup,\"ax\"\n"		\
-+                      "3:       movl %4,%0\n"			\
-+                      " jmp 2b\n"				\
-+                      ".previous\n"				\
-+                      ".section __ex_table,\"a\"\n"		\
-+                      " .align 8\n"				\
-+                      " .quad 1b,3b\n"				\
-+                      ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
-+                      :"c"(msr), "i"(-EIO), "0"(0));		\
-+	  ret__; })		
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#include <asm/thread_info.h>
++#endif
++#endif
 +
-+#define rdtsc(low,high) \
-+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
++#ifdef CONFIG_SMP
++#ifndef ASSEMBLY
 +
-+#define rdtscl(low) \
-+     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
++#include <asm/pda.h>
 +
-+#define rdtscll(val) do { \
-+     unsigned int __a,__d; \
-+     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
-+     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
-+} while(0)
++struct pt_regs;
 +
-+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
++extern cpumask_t cpu_present_mask;
++extern cpumask_t cpu_possible_map;
++extern cpumask_t cpu_online_map;
++extern cpumask_t cpu_initialized;
 +
-+#define rdpmc(counter,low,high) \
-+     __asm__ __volatile__("rdpmc" \
-+			  : "=a" (low), "=d" (high) \
-+			  : "c" (counter))
++/*
++ * Private routines/data
++ */
++ 
++extern void smp_alloc_memory(void);
++extern volatile unsigned long smp_invalidate_needed;
++extern int pic_mode;
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++extern int smp_num_siblings;
++extern void smp_send_reschedule(int cpu);
++void smp_stop_cpu(void);
++extern int smp_call_function_single(int cpuid, void (*func) (void *info),
++				void *info, int retry, int wait);
 +
-+static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
-+			 unsigned int *ecx, unsigned int *edx)
-+{
-+	__asm__(XEN_CPUID
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op));
-+}
++extern cpumask_t cpu_sibling_map[NR_CPUS];
++extern cpumask_t cpu_core_map[NR_CPUS];
++extern u8 cpu_llc_id[NR_CPUS];
 +
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+	       	int *edx)
-+{
-+	__asm__(XEN_CPUID
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c" (count));
-+}
++#define SMP_TRAMPOLINE_BASE 0x6000
 +
 +/*
-+ * CPUID functions returning a single datum
++ * On x86 all CPUs are mapped 1:1 to the APIC space.
++ * This simplifies scheduling and IPI sending and
++ * compresses data structures.
 + */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+	unsigned int eax;
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax)
-+		: "0" (op)
-+		: "bx", "cx", "dx");
-+	return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
++static inline int num_booting_cpus(void)
 +{
-+	unsigned int eax, ebx;
-+
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=b" (ebx)
-+		: "0" (op)
-+		: "cx", "dx" );
-+	return ebx;
++	return cpus_weight(cpu_possible_map);
 +}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+	unsigned int eax, ecx;
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=c" (ecx)
-+		: "0" (op)
-+		: "bx", "dx" );
-+	return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
-+{
-+	unsigned int eax, edx;
++#define raw_smp_processor_id() read_pda(cpunumber)
 +
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=d" (edx)
-+		: "0" (op)
-+		: "bx", "cx");
-+	return edx;
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline int hard_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
 +}
++#endif
++
++extern int safe_smp_processor_id(void);
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++extern unsigned num_processors;
++extern unsigned disabled_cpus;
 +
-+#define MSR_IA32_UCODE_WRITE		0x79
-+#define MSR_IA32_UCODE_REV		0x8b
++#endif /* !ASSEMBLY */
 +
++#define NO_PROC_ID		0xFF		/* No processor magic marker */
 +
 +#endif
 +
-+/* AMD/K8 specific MSRs */ 
-+#define MSR_EFER 0xc0000080		/* extended feature register */
-+#define MSR_STAR 0xc0000081		/* legacy mode SYSCALL target */
-+#define MSR_LSTAR 0xc0000082 		/* long mode SYSCALL target */
-+#define MSR_CSTAR 0xc0000083		/* compatibility mode SYSCALL target */
-+#define MSR_SYSCALL_MASK 0xc0000084	/* EFLAGS mask for syscall */
-+#define MSR_FS_BASE 0xc0000100		/* 64bit GS base */
-+#define MSR_GS_BASE 0xc0000101		/* 64bit FS base */
-+#define MSR_KERNEL_GS_BASE  0xc0000102	/* SwapGS GS shadow (or USER_GS from kernel) */ 
-+/* EFER bits: */ 
-+#define _EFER_SCE 0  /* SYSCALL/SYSRET */
-+#define _EFER_LME 8  /* Long mode enable */
-+#define _EFER_LMA 10 /* Long mode active (read-only) */
-+#define _EFER_NX 11  /* No execute enable */
++#ifndef ASSEMBLY
++/*
++ * Some lowlevel functions might want to know about
++ * the real APIC ID <-> CPU # mapping.
++ */
++extern u8 x86_cpu_to_apicid[NR_CPUS];	/* physical ID */
++extern u8 x86_cpu_to_log_apicid[NR_CPUS];
++extern u8 bios_cpu_apicid[];
 +
-+#define EFER_SCE (1<<_EFER_SCE)
-+#define EFER_LME (1<<_EFER_LME)
-+#define EFER_LMA (1<<_EFER_LMA)
-+#define EFER_NX (1<<_EFER_NX)
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++	return cpus_addr(cpumask)[0];
++}
 +
-+/* Intel MSRs. Some also available on other CPUs */
-+#define MSR_IA32_TSC		0x10
-+#define MSR_IA32_PLATFORM_ID	0x17
++static inline int cpu_present_to_apicid(int mps_cpu)
++{
++	if (mps_cpu < NR_CPUS)
++		return (int)bios_cpu_apicid[mps_cpu];
++	else
++		return BAD_APICID;
++}
++#endif
 +
-+#define MSR_IA32_PERFCTR0      0xc1
-+#define MSR_IA32_PERFCTR1      0xc2
++#endif /* !ASSEMBLY */
 +
-+#define MSR_MTRRcap		0x0fe
-+#define MSR_IA32_BBL_CR_CTL        0x119
++#ifndef CONFIG_SMP
++#define stack_smp_processor_id() 0
++#define safe_smp_processor_id() 0
++#define cpu_logical_map(x) (x)
++#else
++#include <asm/thread_info.h>
++#define stack_smp_processor_id() \
++({ 								\
++	struct thread_info *ti;					\
++	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
++	ti->cpu;						\
++})
++#endif
 +
-+#define MSR_IA32_SYSENTER_CS	0x174
-+#define MSR_IA32_SYSENTER_ESP	0x175
-+#define MSR_IA32_SYSENTER_EIP	0x176
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
 +
-+#define MSR_IA32_MCG_CAP       0x179
-+#define MSR_IA32_MCG_STATUS        0x17a
-+#define MSR_IA32_MCG_CTL       0x17b
++#ifdef CONFIG_SMP
++#define cpu_physical_id(cpu)		x86_cpu_to_apicid[cpu]
++#else
++#define cpu_physical_id(cpu)		boot_cpu_id
++#endif
 +
-+#define MSR_IA32_EVNTSEL0      0x186
-+#define MSR_IA32_EVNTSEL1      0x187
++#endif
 +
-+#define MSR_IA32_DEBUGCTLMSR       0x1d9
-+#define MSR_IA32_LASTBRANCHFROMIP  0x1db
-+#define MSR_IA32_LASTBRANCHTOIP        0x1dc
-+#define MSR_IA32_LASTINTFROMIP     0x1dd
-+#define MSR_IA32_LASTINTTOIP       0x1de
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/synch_bitops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/synch_bitops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,2 @@
 +
-+#define MSR_MTRRfix64K_00000	0x250
-+#define MSR_MTRRfix16K_80000	0x258
-+#define MSR_MTRRfix16K_A0000	0x259
-+#define MSR_MTRRfix4K_C0000	0x268
-+#define MSR_MTRRfix4K_C8000	0x269
-+#define MSR_MTRRfix4K_D0000	0x26a
-+#define MSR_MTRRfix4K_D8000	0x26b
-+#define MSR_MTRRfix4K_E0000	0x26c
-+#define MSR_MTRRfix4K_E8000	0x26d
-+#define MSR_MTRRfix4K_F0000	0x26e
-+#define MSR_MTRRfix4K_F8000	0x26f
-+#define MSR_MTRRdefType		0x2ff
++#include <asm-i386/mach-xen/asm/synch_bitops.h>
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/system.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/system.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,256 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
 +
-+#define MSR_IA32_MC0_CTL       0x400
-+#define MSR_IA32_MC0_STATUS        0x401
-+#define MSR_IA32_MC0_ADDR      0x402
-+#define MSR_IA32_MC0_MISC      0x403
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/alternative.h>
 +
-+#define MSR_P6_PERFCTR0			0xc1
-+#define MSR_P6_PERFCTR1			0xc2
-+#define MSR_P6_EVNTSEL0			0x186
-+#define MSR_P6_EVNTSEL1			0x187
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/arch-x86_64.h>
 +
-+/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
-+#define MSR_K7_EVNTSEL0            0xC0010000
-+#define MSR_K7_PERFCTR0            0xC0010004
-+#define MSR_K7_EVNTSEL1            0xC0010001
-+#define MSR_K7_PERFCTR1            0xC0010005
-+#define MSR_K7_EVNTSEL2            0xC0010002
-+#define MSR_K7_PERFCTR2            0xC0010006
-+#define MSR_K7_EVNTSEL3            0xC0010003
-+#define MSR_K7_PERFCTR3            0xC0010007
-+#define MSR_K8_TOP_MEM1		   0xC001001A
-+#define MSR_K8_TOP_MEM2		   0xC001001D
-+#define MSR_K8_SYSCFG		   0xC0010010
-+#define MSR_K8_HWCR		   0xC0010015
++#ifdef __KERNEL__
 +
-+/* K6 MSRs */
-+#define MSR_K6_EFER			0xC0000080
-+#define MSR_K6_STAR			0xC0000081
-+#define MSR_K6_WHCR			0xC0000082
-+#define MSR_K6_UWCCR			0xC0000085
-+#define MSR_K6_PSOR			0xC0000087
-+#define MSR_K6_PFIR			0xC0000088
++#define __STR(x) #x
++#define STR(x) __STR(x)
 +
-+/* Centaur-Hauls/IDT defined MSRs. */
-+#define MSR_IDT_FCR1			0x107
-+#define MSR_IDT_FCR2			0x108
-+#define MSR_IDT_FCR3			0x109
-+#define MSR_IDT_FCR4			0x10a
++#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
++#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
 +
-+#define MSR_IDT_MCR0			0x110
-+#define MSR_IDT_MCR1			0x111
-+#define MSR_IDT_MCR2			0x112
-+#define MSR_IDT_MCR3			0x113
-+#define MSR_IDT_MCR4			0x114
-+#define MSR_IDT_MCR5			0x115
-+#define MSR_IDT_MCR6			0x116
-+#define MSR_IDT_MCR7			0x117
-+#define MSR_IDT_MCR_CTRL		0x120
++/* frame pointer must be last for get_wchan */
++#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t"
 +
-+/* VIA Cyrix defined MSRs*/
-+#define MSR_VIA_FCR			0x1107
-+#define MSR_VIA_LONGHAUL		0x110a
-+#define MSR_VIA_RNG			0x110b
-+#define MSR_VIA_BCR2			0x1147
++#define __EXTRA_CLOBBER  \
++	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
 +
-+/* Intel defined MSRs. */
-+#define MSR_IA32_P5_MC_ADDR		0
-+#define MSR_IA32_P5_MC_TYPE		1
-+#define MSR_IA32_PLATFORM_ID		0x17
-+#define MSR_IA32_EBL_CR_POWERON		0x2a
++#define switch_to(prev,next,last) \
++	asm volatile(SAVE_CONTEXT						    \
++		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
++		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
++		     "call __switch_to\n\t"					  \
++		     ".globl thread_return\n"					\
++		     "thread_return:\n\t"					    \
++		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
++		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
++		     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
++		     "movq %%rax,%%rdi\n\t" 					  \
++		     "jc   ret_from_fork\n\t"					  \
++		     RESTORE_CONTEXT						    \
++		     : "=a" (last)					  	  \
++		     : [next] "S" (next), [prev] "D" (prev),			  \
++		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
++		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
++		       [tif_fork] "i" (TIF_FORK),			  \
++		       [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
++		     : "memory", "cc" __EXTRA_CLOBBER)
++    
++extern void load_gs_index(unsigned); 
 +
-+#define MSR_IA32_APICBASE               0x1b
-+#define MSR_IA32_APICBASE_BSP           (1<<8)
-+#define MSR_IA32_APICBASE_ENABLE        (1<<11)
-+#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value)	\
++	asm volatile("\n"			\
++		"1:\t"				\
++		"movl %k0,%%" #seg "\n"		\
++		"2:\n"				\
++		".section .fixup,\"ax\"\n"	\
++		"3:\t"				\
++		"movl %1,%%" #seg "\n\t" 	\
++		"jmp 2b\n"			\
++		".previous\n"			\
++		".section __ex_table,\"a\"\n\t"	\
++		".align 8\n\t"			\
++		".quad 1b,3b\n"			\
++		".previous"			\
++		: :"r" (value), "r" (0))
 +
-+/* P4/Xeon+ specific */
-+#define MSR_IA32_MCG_EAX		0x180
-+#define MSR_IA32_MCG_EBX		0x181
-+#define MSR_IA32_MCG_ECX		0x182
-+#define MSR_IA32_MCG_EDX		0x183
-+#define MSR_IA32_MCG_ESI		0x184
-+#define MSR_IA32_MCG_EDI		0x185
-+#define MSR_IA32_MCG_EBP		0x186
-+#define MSR_IA32_MCG_ESP		0x187
-+#define MSR_IA32_MCG_EFLAGS		0x188
-+#define MSR_IA32_MCG_EIP		0x189
-+#define MSR_IA32_MCG_RESERVED		0x18A
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
 +
-+#define MSR_P6_EVNTSEL0			0x186
-+#define MSR_P6_EVNTSEL1			0x187
++static inline unsigned long read_cr0(void)
++{ 
++	unsigned long cr0;
++	asm volatile("movq %%cr0,%0" : "=r" (cr0));
++	return cr0;
++} 
 +
-+#define MSR_IA32_PERF_STATUS		0x198
-+#define MSR_IA32_PERF_CTL		0x199
++static inline void write_cr0(unsigned long val) 
++{ 
++	asm volatile("movq %0,%%cr0" :: "r" (val));
++} 
 +
-+#define MSR_IA32_THERM_CONTROL		0x19a
-+#define MSR_IA32_THERM_INTERRUPT	0x19b
-+#define MSR_IA32_THERM_STATUS		0x19c
-+#define MSR_IA32_MISC_ENABLE		0x1a0
++#define read_cr3() ({ \
++	unsigned long __dummy; \
++	asm("movq %%cr3,%0" : "=r" (__dummy)); \
++	machine_to_phys(__dummy); \
++})
 +
-+#define MSR_IA32_DEBUGCTLMSR		0x1d9
-+#define MSR_IA32_LASTBRANCHFROMIP	0x1db
-+#define MSR_IA32_LASTBRANCHTOIP		0x1dc
-+#define MSR_IA32_LASTINTFROMIP		0x1dd
-+#define MSR_IA32_LASTINTTOIP		0x1de
++static inline unsigned long read_cr4(void)
++{ 
++	unsigned long cr4;
++	asm("movq %%cr4,%0" : "=r" (cr4));
++	return cr4;
++} 
 +
-+#define MSR_IA32_MC0_CTL		0x400
-+#define MSR_IA32_MC0_STATUS		0x401
-+#define MSR_IA32_MC0_ADDR		0x402
-+#define MSR_IA32_MC0_MISC		0x403
++static inline void write_cr4(unsigned long val)
++{ 
++	asm volatile("movq %0,%%cr4" :: "r" (val));
++} 
 +
-+/* Pentium IV performance counter MSRs */
-+#define MSR_P4_BPU_PERFCTR0 		0x300
-+#define MSR_P4_BPU_PERFCTR1 		0x301
-+#define MSR_P4_BPU_PERFCTR2 		0x302
-+#define MSR_P4_BPU_PERFCTR3 		0x303
-+#define MSR_P4_MS_PERFCTR0 		0x304
-+#define MSR_P4_MS_PERFCTR1 		0x305
-+#define MSR_P4_MS_PERFCTR2 		0x306
-+#define MSR_P4_MS_PERFCTR3 		0x307
-+#define MSR_P4_FLAME_PERFCTR0 		0x308
-+#define MSR_P4_FLAME_PERFCTR1 		0x309
-+#define MSR_P4_FLAME_PERFCTR2 		0x30a
-+#define MSR_P4_FLAME_PERFCTR3 		0x30b
-+#define MSR_P4_IQ_PERFCTR0 		0x30c
-+#define MSR_P4_IQ_PERFCTR1 		0x30d
-+#define MSR_P4_IQ_PERFCTR2 		0x30e
-+#define MSR_P4_IQ_PERFCTR3 		0x30f
-+#define MSR_P4_IQ_PERFCTR4 		0x310
-+#define MSR_P4_IQ_PERFCTR5 		0x311
-+#define MSR_P4_BPU_CCCR0 		0x360
-+#define MSR_P4_BPU_CCCR1 		0x361
-+#define MSR_P4_BPU_CCCR2 		0x362
-+#define MSR_P4_BPU_CCCR3 		0x363
-+#define MSR_P4_MS_CCCR0 		0x364
-+#define MSR_P4_MS_CCCR1 		0x365
-+#define MSR_P4_MS_CCCR2 		0x366
-+#define MSR_P4_MS_CCCR3 		0x367
-+#define MSR_P4_FLAME_CCCR0 		0x368
-+#define MSR_P4_FLAME_CCCR1 		0x369
-+#define MSR_P4_FLAME_CCCR2 		0x36a
-+#define MSR_P4_FLAME_CCCR3 		0x36b
-+#define MSR_P4_IQ_CCCR0 		0x36c
-+#define MSR_P4_IQ_CCCR1 		0x36d
-+#define MSR_P4_IQ_CCCR2 		0x36e
-+#define MSR_P4_IQ_CCCR3 		0x36f
-+#define MSR_P4_IQ_CCCR4 		0x370
-+#define MSR_P4_IQ_CCCR5 		0x371
-+#define MSR_P4_ALF_ESCR0 		0x3ca
-+#define MSR_P4_ALF_ESCR1 		0x3cb
-+#define MSR_P4_BPU_ESCR0 		0x3b2
-+#define MSR_P4_BPU_ESCR1 		0x3b3
-+#define MSR_P4_BSU_ESCR0 		0x3a0
-+#define MSR_P4_BSU_ESCR1 		0x3a1
-+#define MSR_P4_CRU_ESCR0 		0x3b8
-+#define MSR_P4_CRU_ESCR1 		0x3b9
-+#define MSR_P4_CRU_ESCR2 		0x3cc
-+#define MSR_P4_CRU_ESCR3 		0x3cd
-+#define MSR_P4_CRU_ESCR4 		0x3e0
-+#define MSR_P4_CRU_ESCR5 		0x3e1
-+#define MSR_P4_DAC_ESCR0 		0x3a8
-+#define MSR_P4_DAC_ESCR1 		0x3a9
-+#define MSR_P4_FIRM_ESCR0 		0x3a4
-+#define MSR_P4_FIRM_ESCR1 		0x3a5
-+#define MSR_P4_FLAME_ESCR0 		0x3a6
-+#define MSR_P4_FLAME_ESCR1 		0x3a7
-+#define MSR_P4_FSB_ESCR0 		0x3a2
-+#define MSR_P4_FSB_ESCR1 		0x3a3
-+#define MSR_P4_IQ_ESCR0 		0x3ba
-+#define MSR_P4_IQ_ESCR1 		0x3bb
-+#define MSR_P4_IS_ESCR0 		0x3b4
-+#define MSR_P4_IS_ESCR1 		0x3b5
-+#define MSR_P4_ITLB_ESCR0 		0x3b6
-+#define MSR_P4_ITLB_ESCR1 		0x3b7
-+#define MSR_P4_IX_ESCR0 		0x3c8
-+#define MSR_P4_IX_ESCR1 		0x3c9
-+#define MSR_P4_MOB_ESCR0 		0x3aa
-+#define MSR_P4_MOB_ESCR1 		0x3ab
-+#define MSR_P4_MS_ESCR0 		0x3c0
-+#define MSR_P4_MS_ESCR1 		0x3c1
-+#define MSR_P4_PMH_ESCR0 		0x3ac
-+#define MSR_P4_PMH_ESCR1 		0x3ad
-+#define MSR_P4_RAT_ESCR0 		0x3bc
-+#define MSR_P4_RAT_ESCR1 		0x3bd
-+#define MSR_P4_SAAT_ESCR0 		0x3ae
-+#define MSR_P4_SAAT_ESCR1 		0x3af
-+#define MSR_P4_SSU_ESCR0 		0x3be
-+#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
-+#define MSR_P4_TBPU_ESCR0 		0x3c2
-+#define MSR_P4_TBPU_ESCR1 		0x3c3
-+#define MSR_P4_TC_ESCR0 		0x3c4
-+#define MSR_P4_TC_ESCR1 		0x3c5
-+#define MSR_P4_U2L_ESCR0 		0x3b0
-+#define MSR_P4_U2L_ESCR1 		0x3b1
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#define wbinvd() \
++	__asm__ __volatile__ ("wbinvd": : :"memory");
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/nmi.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/nmi.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/nmi.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,93 @@
 +/*
-+ *  linux/include/asm-i386/nmi.h
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible.
 + */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
-+
-+#include <linux/pm.h>
-+#include <asm/io.h>
-+
-+#include <xen/interface/nmi.h>
++static inline void sched_cacheflush(void)
++{
++	wbinvd();
++}
 +
-+struct pt_regs;
++#endif	/* __KERNEL__ */
 +
-+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
++#define nop() __asm__ __volatile__ ("nop")
 +
-+/**
-+ * set_nmi_callback
-+ *
-+ * Set a handler for an NMI. Only one handler may be
-+ * set. Return 1 if the NMI was handled.
-+ */
-+void set_nmi_callback(nmi_callback_t callback);
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
 +
-+/**
-+ * unset_nmi_callback
-+ *
-+ * Remove the handler previously set.
-+ */
-+void unset_nmi_callback(void);
++#define tas(ptr) (xchg((ptr),1))
 +
-+#ifdef CONFIG_PM
-+ 
-+/** Replace the PM callback routine for NMI. */
-+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
++#define __xg(x) ((volatile long *)(x))
 +
-+/** Unset the PM callback routine back to the default. */
-+void unset_nmi_pm_callback(struct pm_dev * dev);
++static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++{
++	*ptr = val;
++}
 +
-+#else
++#define _set_64bit set_64bit
 +
-+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-+{
-+	return 0;
-+} 
-+ 
-+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 +{
++	switch (size) {
++		case 1:
++			__asm__ __volatile__("xchgb %b0,%1"
++				:"=q" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 2:
++			__asm__ __volatile__("xchgw %w0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 4:
++			__asm__ __volatile__("xchgl %k0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 8:
++			__asm__ __volatile__("xchgq %0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++	}
++	return x;
 +}
 +
-+#endif /* CONFIG_PM */
-+ 
-+extern void default_do_nmi(struct pt_regs *);
-+extern void die_nmi(char *str, struct pt_regs *regs);
++/*
++ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
++ * store NEW in MEM.  Return the initial value in MEM.  Success is
++ * indicated by comparing RETURN with OLD.
++ */
 +
-+static inline unsigned char get_nmi_reason(void)
++#define __HAVE_ARCH_CMPXCHG 1
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++				      unsigned long new, int size)
 +{
-+        shared_info_t *s = HYPERVISOR_shared_info;
-+        unsigned char reason = 0;
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 4:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 8:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	}
++	return old;
++}
 +
-+        /* construct a value which looks like it came from
-+         * port 0x61.
-+         */
-+        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+                reason |= 0x40;
-+        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+                reason |= 0x80;
++#define cmpxchg(ptr,o,n)\
++	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++					(unsigned long)(n),sizeof(*(ptr))))
++
++#ifdef CONFIG_SMP
++#define smp_mb()	mb()
++#define smp_rmb()	rmb()
++#define smp_wmb()	wmb()
++#define smp_read_barrier_depends()	do {} while(0)
++#else
++#define smp_mb()	barrier()
++#define smp_rmb()	barrier()
++#define smp_wmb()	barrier()
++#define smp_read_barrier_depends()	do {} while(0)
++#endif
 +
-+        return reason;
-+}
++    
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ */
++#define mb() 	asm volatile("mfence":::"memory")
++#define rmb()	asm volatile("lfence":::"memory")
 +
-+extern int panic_on_timeout;
-+extern int unknown_nmi_panic;
++#ifdef CONFIG_UNORDERED_IO
++#define wmb()	asm volatile("sfence" ::: "memory")
++#else
++#define wmb()	asm volatile("" ::: "memory")
++#endif
++#define read_barrier_depends()	do {} while(0)
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 +
-+extern int check_nmi_watchdog(void);
-+ 
-+extern void setup_apic_nmi_watchdog (void);
-+extern int reserve_lapic_nmi(void);
-+extern void release_lapic_nmi(void);
-+extern void disable_timer_nmi_watchdog(void);
-+extern void enable_timer_nmi_watchdog(void);
-+extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
++#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
 +
-+extern void nmi_watchdog_default(void);
-+extern int setup_nmi_watchdog(char *);
++#include <linux/irqflags.h>
 +
-+extern unsigned int nmi_watchdog;
-+#define NMI_DEFAULT	-1
-+#define NMI_NONE	0
-+#define NMI_IO_APIC	1
-+#define NMI_LOCAL_APIC	2
-+#define NMI_INVALID	3
++void cpu_idle_wait(void);
 +
-+#endif /* ASM_NMI_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/page.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/page.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/page.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,217 @@
-+#ifndef _X86_64_PAGE_H
-+#define _X86_64_PAGE_H
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 +
-+/* #include <linux/string.h> */
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <asm/bug.h>
 +#endif
-+#include <xen/interface/xen.h> 
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/timer.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/timer.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,67 @@
++#ifndef _ASMi386_TIMER_H
++#define _ASMi386_TIMER_H
++#include <linux/init.h>
 +
-+/*
-+ * Need to repeat this here in order to not include pgtable.h (which in turn
-+ * depends on definitions made here), but to be able to use the symbolic
-+ * below. The preprocessor will warn if the two definitions aren't identical.
++/**
++ * struct timer_ops - used to define a timer source
++ *
++ * @name: name of the timer.
++ * @init: Probes and initializes the timer. Takes clock= override 
++ *        string as an argument. Returns 0 on success, anything else
++ *        on failure.
++ * @mark_offset: called by the timer interrupt.
++ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
++ *               since the last timer interupt.
++ * @monotonic_clock: returns the number of nanoseconds since the init of the
++ *                   timer.
++ * @delay: delays this many clock cycles.
 + */
-+#define _PAGE_PRESENT	0x001
++struct timer_opts {
++	char* name;
++	void (*mark_offset)(void);
++	unsigned long (*get_offset)(void);
++	unsigned long long (*monotonic_clock)(void);
++	void (*delay)(unsigned long);
++	unsigned long (*read_timer)(void);
++	int (*suspend)(pm_message_t state);
++	int (*resume)(void);
++};
 +
-+#define arch_free_page(_page,_order)		\
-+({	int foreign = PageForeign(_page);	\
-+	if (foreign)				\
-+		PageForeignDestructor(_page);	\
-+	foreign;				\
-+})
-+#define HAVE_ARCH_FREE_PAGE
++struct init_timer_opts {
++	int (*init)(char *override);
++	struct timer_opts *opts;
++};
 +
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT	12
-+#ifdef __ASSEMBLY__
-+#define PAGE_SIZE	(0x1 << PAGE_SHIFT)
-+#else
-+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-+#endif
-+#define PAGE_MASK	(~(PAGE_SIZE-1))
++#define TICK_SIZE (tick_nsec / 1000)
 +
-+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-+#define __PHYSICAL_MASK_SHIFT	46
-+#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
-+#define __VIRTUAL_MASK_SHIFT	48
-+#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++extern struct timer_opts* __init select_timer(void);
++extern void clock_fallback(void);
++void setup_pit_timer(void);
 +
-+#define PHYSICAL_PAGE_MASK	(~(PAGE_SIZE-1) & __PHYSICAL_MASK)
++/* Modifiers for buggy PIT handling */
 +
-+#define THREAD_ORDER 1 
-+#define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
-+#define CURRENT_MASK (~(THREAD_SIZE-1))
++extern int pit_latch_buggy;
 +
-+#define EXCEPTION_STACK_ORDER 0
-+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
++extern struct timer_opts *cur_timer;
++extern int timer_ack;
 +
-+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
++/* list of externed timers */
++extern struct timer_opts timer_none;
++extern struct timer_opts timer_pit;
++extern struct init_timer_opts timer_pit_init;
++extern struct init_timer_opts timer_tsc_init;
++#ifdef CONFIG_X86_CYCLONE_TIMER
++extern struct init_timer_opts timer_cyclone_init;
++#endif
 +
-+#define IRQSTACK_ORDER 2
-+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
++extern unsigned long calibrate_tsc(void);
++extern void init_cpu_khz(void);
++#ifdef CONFIG_HPET_TIMER
++extern struct init_timer_opts timer_hpet_init;
++extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
++#endif
 +
-+#define STACKFAULT_STACK 1
-+#define DOUBLEFAULT_STACK 2
-+#define NMI_STACK 3
-+#define DEBUG_STACK 4
-+#define MCE_STACK 5
-+#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#ifdef CONFIG_X86_PM_TIMER
++extern struct init_timer_opts timer_pmtmr_init;
++#endif
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/tlbflush.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/tlbflush.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,103 @@
++#ifndef _X8664_TLBFLUSH_H
++#define _X8664_TLBFLUSH_H
 +
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++#include <linux/mm.h>
++#include <asm/processor.h>
 +
-+#define HPAGE_SHIFT PMD_SHIFT
-+#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
++#define __flush_tlb()	xen_tlb_flush()
 +
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
++/*
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++#define __flush_tlb_global()	xen_tlb_flush()
 +
-+extern unsigned long end_pfn;
 +
-+#include <asm/maddr.h>
++extern unsigned long pgkern_mask;
 +
-+void clear_page(void *);
-+void copy_page(void *, void *);
++#define __flush_tlb_all() __flush_tlb_global()
 +
-+#define clear_user_page(page, vaddr, pg)	clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
++#define __flush_tlb_one(addr)	xen_invlpg((unsigned long)addr)
 +
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 +
 +/*
-+ * These are used to make use of C type-checking..
++ * TLB flushing:
++ *
++ *  - flush_tlb() flushes the current mm struct TLBs
++ *  - flush_tlb_all() flushes all processes TLBs
++ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ *  - flush_tlb_page(vma, vmaddr) flushes one page
++ *  - flush_tlb_range(vma, start, end) flushes a range of pages
++ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * x86-64 can only flush individual pages or full VMs. For a range flush
++ * we always do the full VM. Might be worth trying if for a small
++ * range a few INVLPGs in a row are a win.
 + */
-+typedef struct { unsigned long pte; } pte_t;
-+typedef struct { unsigned long pmd; } pmd_t;
-+typedef struct { unsigned long pud; } pud_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+#define PTE_MASK	PHYSICAL_PAGE_MASK
 +
-+typedef struct { unsigned long pgprot; } pgprot_t;
++#ifndef CONFIG_SMP
 +
-+#define __pte_val(x) ((x).pte)
-+#define pte_val(x) ((__pte_val(x) & _PAGE_PRESENT) ? \
-+                    pte_machine_to_phys(__pte_val(x)) : \
-+                    __pte_val(x))
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
 +
-+#define __pmd_val(x) ((x).pmd)
-+static inline unsigned long pmd_val(pmd_t x)
++static inline void flush_tlb_mm(struct mm_struct *mm)
 +{
-+	unsigned long ret = __pmd_val(x);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+#endif
-+	return ret;
++	if (mm == current->active_mm)
++		__flush_tlb();
 +}
 +
-+#define __pud_val(x) ((x).pud)
-+static inline unsigned long pud_val(pud_t x)
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++	unsigned long addr)
 +{
-+	unsigned long ret = __pud_val(x);
-+	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+	return ret;
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb_one(addr);
 +}
 +
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long pgd_val(pgd_t x)
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++	unsigned long start, unsigned long end)
 +{
-+	unsigned long ret = __pgd_val(x);
-+	if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+	return ret;
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb();
 +}
 +
-+#define pgprot_val(x)	((x).pgprot)
++#else
 +
-+static inline pte_t __pte(unsigned long x)
-+{
-+	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+	return ((pte_t) { (x) });
-+}
++#include <asm/smp.h>
 +
-+static inline pmd_t __pmd(unsigned long x)
-+{
-+	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+	return ((pmd_t) { (x) });
-+}
++#define local_flush_tlb() \
++	__flush_tlb()
 +
-+static inline pud_t __pud(unsigned long x)
-+{
-+	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+	return ((pud_t) { (x) });
-+}
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
 +
-+static inline pgd_t __pgd(unsigned long x)
++#define flush_tlb()	flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
 +{
-+	if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+	return ((pgd_t) { (x) });
++	flush_tlb_mm(vma->vm_mm);
 +}
 +
-+#define __pgprot(x)	((pgprot_t) { (x) } )
-+
-+#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
-+#define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
-+#define __START_KERNEL_map	0xffffffff80000000UL
-+#define __PAGE_OFFSET           0xffff880000000000UL	
++#define TLBSTATE_OK	1
++#define TLBSTATE_LAZY	2
 +
-+#else
-+#define __PHYSICAL_START	CONFIG_PHYSICAL_START
-+#define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
-+#define __START_KERNEL_map	0xffffffff80000000
-+#define __PAGE_OFFSET           0xffff880000000000
-+#endif /* !__ASSEMBLY__ */
++/* Roughly an IPI every 20MB with 4k pages for freeing page table
++   ranges. Cost is about 42k of memory for each CPU. */
++#define ARCH_FREE_PTE_NR 5350	
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET		0
 +#endif
 +
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
-+
-+#define KERNEL_TEXT_SIZE  (40UL*1024*1024)
-+#define KERNEL_TEXT_START 0xffffffff80000000UL 
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
 +
-+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++				      unsigned long start, unsigned long end)
++{
++	/* x86_64 does not keep any page table caches in a software TLB.
++	   The CPUs do in their hardware TLBs, but they are handled
++	   by the normal TLB flushing algorithms. */
++}
 +
-+/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
-+   Otherwise you risk miscompilation. */ 
-+#define __pa(x)			(((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
-+/* __pa_symbol should be used for C visible symbols.
-+   This seems to be the official gcc blessed way to do such arithmetic. */ 
-+#define __pa_symbol(x)		\
-+	({unsigned long v;  \
-+	  asm("" : "=r" (v) : "0" (x)); \
-+	  __pa(v); })
++#endif /* _X8664_TLBFLUSH_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/vga.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/vga.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,20 @@
++/*
++ *	Access to VGA videoram
++ *
++ *	(c) 1998 Martin Mares <mj at ucw.cz>
++ */
 +
-+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#define __boot_va(x)		__va(x)
-+#define __boot_pa(x)		__pa(x)
-+#ifdef CONFIG_FLATMEM
-+#define pfn_valid(pfn)		((pfn) < end_pfn)
-+#endif
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
 +
-+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
++/*
++ *	On the PC, we can just recalculate addresses and then
++ *	access the videoram directly without any black magic.
++ */
 +
-+#define VM_DATA_DEFAULT_FLAGS \
-+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
 +
-+#define __HAVE_ARCH_GATE_AREA 1	
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
 +
-+#include <asm-generic/memory_model.h>
-+#include <asm-generic/page.h>
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/xenoprof.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/xenoprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,1 @@
++#include <asm-i386/mach-xen/asm/xenoprof.h>
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/asm/xor.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/asm/xor.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,328 @@
++/*
++ * x86-64 changes / gcc fixes from Andi Kleen. 
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ *
++ * This hasn't been optimized for the hammer yet, but there are likely
++ * no advantages to be gotten from x86-64 here anyways.
++ */
 +
-+#endif /* __KERNEL__ */
++typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
 +
-+#endif /* _X86_64_PAGE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pci.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pci.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pci.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,168 @@
-+#ifndef __x8664_PCI_H
-+#define __x8664_PCI_H
++/* Doesn't use gcc to save the XMM registers, because there is no easy way to 
++   tell it to do a clts before the register saving. */
++#define XMMS_SAVE do {				\
++	preempt_disable();			\
++	if (!(current_thread_info()->status & TS_USEDFPU))	\
++		clts();				\
++	__asm__ __volatile__ ( 			\
++		"movups %%xmm0,(%1)	;\n\t"	\
++		"movups %%xmm1,0x10(%1)	;\n\t"	\
++		"movups %%xmm2,0x20(%1)	;\n\t"	\
++		"movups %%xmm3,0x30(%1)	;\n\t"	\
++		: "=&r" (cr0)			\
++		: "r" (xmm_save) 		\
++		: "memory");			\
++} while(0)
 +
-+#include <asm/io.h>
++#define XMMS_RESTORE do {			\
++	asm volatile (				\
++		"sfence			;\n\t"	\
++		"movups (%1),%%xmm0	;\n\t"	\
++		"movups 0x10(%1),%%xmm1	;\n\t"	\
++		"movups 0x20(%1),%%xmm2	;\n\t"	\
++		"movups 0x30(%1),%%xmm3	;\n\t"	\
++		:				\
++		: "r" (cr0), "r" (xmm_save)	\
++		: "memory");			\
++	if (!(current_thread_info()->status & TS_USEDFPU))	\
++		stts();				\
++	preempt_enable();			\
++} while(0)
 +
-+#ifdef __KERNEL__
++#define OFFS(x)		"16*("#x")"
++#define PF_OFFS(x)	"256+16*("#x")"
++#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%[p1])		;\n"
++#define LD(x,y)		"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
++#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
++#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%[p2])		;\n"
++#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%[p3])		;\n"
++#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%[p4])		;\n"
++#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%[p5])		;\n"
++#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%[p6])		;\n"
++#define XO1(x,y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
++#define XO2(x,y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
++#define XO3(x,y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
++#define XO4(x,y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
++#define XO5(x,y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
 +
-+#include <linux/mm.h> /* for struct page */
 +
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
++static void
++xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
++{
++        unsigned int lines = bytes >> 8;
++	unsigned long cr0;
++	xmm_store_t xmm_save[4];
 +
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
++	XMMS_SAVE;
 +
-+#include <asm/hypervisor.h>
-+#define pcibios_scan_all_fns(a, b)	(!is_initial_xendomain())
++        asm volatile (
++#undef BLOCK
++#define BLOCK(i) \
++		LD(i,0)					\
++			LD(i+1,1)			\
++		PF1(i)					\
++				PF1(i+2)		\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
 +
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
 +
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
++		PF0(0)
++				PF0(2)
 +
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
++	" .align 32			;\n"
++        " 1:                            ;\n"
 +
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
 +
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/page.h>
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++		"		decl %[cnt] ; jnz 1b"
++	: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
++	: [inc] "r" (256UL) 
++        : "memory");
 +
-+extern void pci_iommu_alloc(void);
-+extern int iommu_setup(char *opt);
++	XMMS_RESTORE;
++}
 +
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions
-+ *
-+ * On AMD64 it mostly equals, but we set it to zero if a hardware
-+ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
++static void
++xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3)
++{
++	unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4];
++	unsigned long cr0;
 +
-+#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
++	XMMS_SAVE;
 +
-+/*
-+ * x86-64 always supports DAC, but sometimes it is useful to force
-+ * devices through the IOMMU to get automatic sg list merging.
-+ * Optional right now.
-+ */
-+extern int iommu_sac_force;
-+#define pci_dac_dma_supported(pci_dev, mask)	(!iommu_sac_force)
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
 +
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
 +
-+#elif defined(CONFIG_SWIOTLB)
++		PF0(0)
++				PF0(2)
 +
-+#define pci_dac_dma_supported(pci_dev, mask)    1
++	" .align 32			;\n"
++        " 1:                            ;\n"
 +
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
 +
-+#else
-+/* No IOMMU */
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]          ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++		"		decl %[cnt] ; jnz 1b"
++	: [cnt] "+r" (lines),
++	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
++	: [inc] "r" (256UL)
++	: "memory"); 
++	XMMS_RESTORE;
++}
 +
-+#define pci_dac_dma_supported(pci_dev, mask)    1
++static void
++xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3, unsigned long *p4)
++{
++	unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4]; 
++	unsigned long cr0;
 +
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
++	XMMS_SAVE;
 +
-+#endif
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		PF3(i)					\
++				PF3(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		XO3(i,0)				\
++			XO3(i+1,1)			\
++				XO3(i+2,2)		\
++					XO3(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
 +
-+#include <asm-generic/pci-dma-compat.h>
 +
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
++		PF0(0)
++				PF0(2)
 +
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return virt_to_page(__va(dma_addr)); 	
-+}
++	" .align 32			;\n"
++        " 1:                            ;\n"
 +
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return (dma_addr & ~PAGE_MASK);
-+}
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
 +
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++        "       addq %[inc], %[p4]           ;\n"
++	"	decl %[cnt] ; jnz 1b"
++	: [cnt] "+c" (lines),
++	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
++	: [inc] "r" (256UL)
++        : "memory" );
 +
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+	flush_write_buffers();
++	XMMS_RESTORE;
 +}
 +
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+					enum pci_dma_burst_strategy *strat,
-+					unsigned long *strategy_parameter)
++static void
++xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
 +{
-+	*strat = PCI_DMA_BURST_INFINITY;
-+	*strategy_parameter = ~0UL;
-+}
-+#endif
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
++        unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4];
++	unsigned long cr0;
 +
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
++	XMMS_SAVE;
 +
-+#endif /* __KERNEL__ */
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		PF3(i)					\
++				PF3(i+2)		\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		PF4(i)					\
++				PF4(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO3(i,0)				\
++			XO3(i+1,1)			\
++				XO3(i+2,2)		\
++					XO3(i+3,3)	\
++		XO4(i,0)				\
++			XO4(i+1,1)			\
++				XO4(i+2,2)		\
++					XO4(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
 +
-+/* generic pci stuff */
-+#ifdef CONFIG_PCI
-+#include <asm-generic/pci.h>
-+#endif
 +
-+#endif /* __x8664_PCI_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pgalloc.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pgalloc.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pgalloc.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,204 @@
-+#ifndef _X86_64_PGALLOC_H
-+#define _X86_64_PGALLOC_H
++		PF0(0)
++				PF0(2)
 +
-+#include <asm/fixmap.h>
-+#include <asm/pda.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>
-+#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
++	" .align 32			;\n"
++        " 1:                            ;\n"
 +
-+#include <xen/features.h>
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
 +
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++        "       addq %[inc], %[p4]           ;\n"
++        "       addq %[inc], %[p5]           ;\n"
++	"	decl %[cnt] ; jnz 1b"
++	: [cnt] "+c" (lines),
++  	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), 
++	  [p5] "+r" (p5)
++	: [inc] "r" (256UL)
++	: "memory");
 +
-+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
-+{
-+	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++	XMMS_RESTORE;
 +}
 +
-+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
-+			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
-+		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+	} else {
-+		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
-+	}
-+}
++static struct xor_block_template xor_block_sse = {
++        .name = "generic_sse",
++        .do_2 = xor_sse_2,
++        .do_3 = xor_sse_3,
++        .do_4 = xor_sse_4,
++        .do_5 = xor_sse_5,
++};
 +
-+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pmd,
-+			       pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, 
-+				       PAGE_KERNEL_RO), 0));
-+		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
-+	} else {
-+		*(pud) =  __pud(_PAGE_TABLE | __pa(pmd));
-+	}
-+}
++#undef XOR_TRY_TEMPLATES
++#define XOR_TRY_TEMPLATES				\
++	do {						\
++		xor_speed(&xor_block_sse);	\
++	} while (0)
 +
++/* We force the use of the SSE xor block because it can write around L2.
++   We may also be able to load into the L1 only depending on how the cpu
++   deals with a load to a line that is being prefetched.  */
++#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/irq_vectors.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/irq_vectors.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,123 @@
 +/*
-+ * We need to use the batch mode here, but pgd_pupulate() won't be
-+ * be called frequently.
-+ */
-+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pud,
-+			       pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, 
-+				       PAGE_KERNEL_RO), 0));
-+		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
-+		set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
-+	} else {
-+		*(pgd) =  __pgd(_PAGE_TABLE | __pa(pud));
-+		*(__user_pgd(pgd)) = *(pgd);
-+	}
-+}
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ *	FIRST_EXTERNAL_VECTOR:
++ *		The first free place for external interrupts
++ *
++ *	SYSCALL_VECTOR:
++ *		The IRQ vector a syscall makes the user to kernel transition
++ *		under.
++ *
++ *	TIMER_IRQ:
++ *		The IRQ number the timer interrupt comes in at.
++ *
++ *	NR_IRQS:
++ *		The total number of interrupt vectors (including all the
++ *		architecture specific interrupts) needed.
++ *
++ */			
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
 +
-+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-+extern void pte_free(struct page *pte);
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR	0x20
 +
-+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+	struct page *pg;
++#define SYSCALL_VECTOR		0x80
 +
-+	pg = pte_alloc_one(mm, addr);
-+	return pg ? page_address(pg) : NULL;
-+}
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
 +
-+static inline void pmd_free(pmd_t *pmd)
-+{
-+	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-+	pte_free(virt_to_page(pmd));
-+}
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ *  some of the following vectors are 'rare', they are merged
++ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ *  TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define INVALIDATE_TLB_VECTOR	0xfd
++#define RESCHEDULE_VECTOR	0xfc
++#define CALL_FUNCTION_VECTOR	0xfb
 +
-+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+	struct page *pg;
++#define THERMAL_APIC_VECTOR	0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR	0xef
++#endif
 +
-+	pg = pte_alloc_one(mm, addr);
-+	return pg ? page_address(pg) : NULL;
-+}
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
 +
-+static inline void pud_free(pud_t *pud)
-+{
-+	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-+	pte_free(virt_to_page(pud));
-+}
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR	0x31
++#define FIRST_SYSTEM_VECTOR	0xef
 +
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+	struct page *page = virt_to_page(pgd);
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
 +
-+	spin_lock(&pgd_lock);
-+	page->index = (pgoff_t)pgd_list;
-+	if (pgd_list)
-+		pgd_list->private = (unsigned long)&page->index;
-+	pgd_list = page;
-+	page->private = (unsigned long)&pgd_list;
-+	spin_unlock(&pgd_lock);
-+}
++#define RESCHEDULE_VECTOR	0
++#define CALL_FUNCTION_VECTOR	1
++#define NR_IPIS			2
 +
-+static inline void pgd_list_del(pgd_t *pgd)
-+{
-+	struct page *next, **pprev, *page = virt_to_page(pgd);
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
 +
-+	spin_lock(&pgd_lock);
-+	next = (struct page *)page->index;
-+	pprev = (struct page **)page->private;
-+	*pprev = next;
-+	if (next)
-+		next->private = (unsigned long)pprev;
-+	spin_unlock(&pgd_lock);
-+}
++#define FPU_IRQ			13
 +
-+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+	/*
-+	 * We allocate two contiguous pages for kernel and user.
-+	 */
-+	unsigned boundary;
-+	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
-+	if (!pgd)
-+		return NULL;
-+	pgd_list_add(pgd);
-+	/*
-+	 * Copy kernel pointers in from init.
-+	 * Could keep a freelist or slab cache of those because the kernel
-+	 * part never changes.
-+	 */
-+	boundary = pgd_index(__PAGE_OFFSET);
-+	memset(pgd, 0, boundary * sizeof(pgd_t));
-+	memcpy(pgd + boundary,
-+	       init_level4_pgt + boundary,
-+	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
++#define	FIRST_VM86_IRQ		3
++#define LAST_VM86_IRQ		15
++#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
 +
-+	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
-+	/*
-+	 * Set level3_user_pgt for vsyscall area
-+	 */
-+	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
-+		__pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE));
-+	return pgd;
-+}
++/*
++ * The flat IRQ space is divided into two regions:
++ *  1. A one-to-one mapping of real physical IRQs. This space is only used
++ *     if we have physical device-access privilege. This region is at the 
++ *     start of the IRQ space so that existing device drivers do not need
++ *     to be modified to translate physical IRQ numbers into our IRQ space.
++ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ *     are bound using the provided bind/unbind functions.
++ */
 +
-+static inline void pgd_free(pgd_t *pgd)
-+{
-+	pte_t *ptep = virt_to_ptep(pgd);
++#define PIRQ_BASE		0
++#define NR_PIRQS		256
 +
-+	if (!pte_write(*ptep)) {
-+		xen_pgd_unpin(__pa(pgd));
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pgd,
-+			       pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
-+			       0));
-+	}
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		256
 +
-+	ptep = virt_to_ptep(__user_pgd(pgd));
++#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS		NR_IRQS
 +
-+	if (!pte_write(*ptep)) {
-+		xen_pgd_unpin(__pa(__user_pgd(pgd)));
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)__user_pgd(pgd),
-+			       pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, 
-+				       PAGE_KERNEL),
-+			       0));
-+	}
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
 +
-+	pgd_list_del(pgd);
-+	free_pages((unsigned long)pgd, 1);
-+}
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
-+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+	if (pte)
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/mach_time.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/mach_time.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,111 @@
++/*
++ *  include/asm-i386/mach-default/mach_time.h
++ *
++ *  Machine specific set RTC function for generic.
++ *  Split out from time.c by Osamu Tomita <tomita at cinet.co.jp>
++ */
++#ifndef _MACH_TIME_H
++#define _MACH_TIME_H
 +
-+	return pte;
-+}
++#include <asm-i386/mc146818rtc.h>
 +
-+/* Should really implement gc for free page table pages. This could be
-+   done with a reference count in struct page. */
++/* for check timing call set_rtc_mmss() 500ms     */
++/* used in arch/i386/time.c::do_timer_interrupt() */
++#define USEC_AFTER	500000
++#define USEC_BEFORE	500000
 +
-+static inline void pte_free_kernel(pte_t *pte)
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ *      sets the minutes. Usually you'll only notice that after reboot!
++ */
++static inline int mach_set_rtc_mmss(unsigned long nowtime)
 +{
-+	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-+	make_page_writable(pte, XENFEAT_writable_page_tables);
-+	free_page((unsigned long)pte); 
-+}
-+
-+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-+#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
-+#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
++	int retval = 0;
++	int real_seconds, real_minutes, cmos_minutes;
++	unsigned char save_control, save_freq_select;
 +
-+#endif /* _X86_64_PGALLOC_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pgtable.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pgtable.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/pgtable.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,575 @@
-+#ifndef _X86_64_PGTABLE_H
-+#define _X86_64_PGTABLE_H
++	save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
++	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
 +
-+/*
-+ * This file contains the functions and defines necessary to modify and use
-+ * the x86-64 page table tree.
-+ */
-+#include <asm/processor.h>
-+#include <asm/fixmap.h>
-+#include <asm/bitops.h>
-+#include <linux/threads.h>
-+#include <linux/sched.h>
-+#include <asm/pda.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
++	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 +
-+extern pud_t level3_user_pgt[512];
-+extern pud_t init_level4_user_pgt[];
++	cmos_minutes = CMOS_READ(RTC_MINUTES);
++	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++		BCD_TO_BIN(cmos_minutes);
 +
-+extern void xen_init_pt(void);
++	/*
++	 * since we're only adjusting minutes and seconds,
++	 * don't interfere with hour overflow. This avoids
++	 * messing with unknown time zones but requires your
++	 * RTC not to be off by more than 15 minutes
++	 */
++	real_seconds = nowtime % 60;
++	real_minutes = nowtime / 60;
++	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++		real_minutes += 30;		/* correct for half hour time zone */
++	real_minutes %= 60;
 +
-+#define virt_to_ptep(__va)						\
-+({									\
-+	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
-+	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
-+	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
-+	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
-+})
++	if (abs(real_minutes - cmos_minutes) < 30) {
++		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++			BIN_TO_BCD(real_seconds);
++			BIN_TO_BCD(real_minutes);
++		}
++		CMOS_WRITE(real_seconds,RTC_SECONDS);
++		CMOS_WRITE(real_minutes,RTC_MINUTES);
++	} else {
++		printk(KERN_WARNING
++		       "set_rtc_mmss: can't update from %d to %d\n",
++		       cmos_minutes, real_minutes);
++		retval = -1;
++	}
 +
-+#define arbitrary_virt_to_machine(__va)					\
-+({									\
-+	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
-+})
-+#endif
++	/* The following flags have to be released exactly in this order,
++	 * otherwise the DS12887 (popular MC146818A clone with integrated
++	 * battery and quartz) will not reset the oscillator and will not
++	 * update precisely 500 ms later. You won't find this mentioned in
++	 * the Dallas Semiconductor data sheets, but who believes data
++	 * sheets anyway ...                           -- Markus Kuhn
++	 */
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 +
-+extern pud_t level3_kernel_pgt[512];
-+extern pud_t level3_physmem_pgt[512];
-+extern pud_t level3_ident_pgt[512];
-+extern pmd_t level2_kernel_pgt[512];
-+extern pgd_t init_level4_pgt[];
-+extern pgd_t boot_level4_pgt[];
-+extern unsigned long __supported_pte_mask;
++	return retval;
++}
 +
-+#define swapper_pg_dir init_level4_pgt
++static inline unsigned long mach_get_cmos_time(void)
++{
++	unsigned int year, mon, day, hour, min, sec;
 +
-+extern int nonx_setup(char *str);
-+extern void paging_init(void);
-+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
++	do {
++		sec = CMOS_READ(RTC_SECONDS);
++		min = CMOS_READ(RTC_MINUTES);
++		hour = CMOS_READ(RTC_HOURS);
++		day = CMOS_READ(RTC_DAY_OF_MONTH);
++		mon = CMOS_READ(RTC_MONTH);
++		year = CMOS_READ(RTC_YEAR);
++	} while (sec != CMOS_READ(RTC_SECONDS));
 +
-+extern unsigned long pgkern_mask;
++	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++		BCD_TO_BIN(sec);
++		BCD_TO_BIN(min);
++		BCD_TO_BIN(hour);
++		BCD_TO_BIN(day);
++		BCD_TO_BIN(mon);
++		BCD_TO_BIN(year);
++	}
 +
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++	year += 1900;
++	if (year < 1970)
++		year += 100;
 +
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT	39
-+#define PTRS_PER_PGD	512
++	return mktime(year, mon, day, hour, min, sec);
++}
 +
++#endif /* !_MACH_TIME_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/mach_timer.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/mach_timer.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,50 @@
 +/*
-+ * 3rd level page
++ *  include/asm-i386/mach-default/mach_timer.h
++ *
++ *  Machine specific calibrate_tsc() for generic.
++ *  Split out from timer_tsc.c by Osamu Tomita <tomita at cinet.co.jp>
 + */
-+#define PUD_SHIFT	30
-+#define PTRS_PER_PUD	512
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
++/* ------ Calibrate the TSC ------- 
++ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
++ * Too much 64-bit arithmetic here to do this cleanly in C, and for
++ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
++ * output busy loop as low as possible. We avoid reading the CTC registers
++ * directly because of the awkward 8-bit access mechanism of the 82C54
++ * device.
 + */
-+#define PMD_SHIFT	21
-+#define PTRS_PER_PMD	512
++#ifndef _MACH_TIMER_H
++#define _MACH_TIMER_H
 +
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE	512
++#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
++#define CALIBRATE_LATCH	\
++	((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
 +
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pte_val(e), pte_pfn(e))
-+#define pmd_ERROR(e) \
-+	printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pmd_val(e), pmd_pfn(e))
-+#define pud_ERROR(e) \
-+	printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+	       &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++static inline void mach_prepare_counter(void)
++{
++       /* Set the Gate high, disable speaker */
++	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 +
-+#define pgd_none(x)	(!__pgd_val(x))
-+#define pud_none(x)	(!__pud_val(x))
++	/*
++	 * Now let's take care of CTC channel 2
++	 *
++	 * Set the Gate high, program CTC channel 2 for mode 0,
++	 * (interrupt on terminal count mode), binary count,
++	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
++	 *
++	 * Some devices need a delay here.
++	 */
++	outb(0xb0, 0x43);			/* binary, mode 0, LSB/MSB, Ch 2 */
++	outb_p(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
++	outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
++}
 +
-+static inline void set_pte(pte_t *dst, pte_t val)
++static inline void mach_countup(unsigned long *count_p)
 +{
-+	*dst = val;
++	unsigned long count = 0;
++	do {
++		count++;
++	} while ((inb_p(0x61) & 0x20) == 0);
++	*count_p = count;
 +}
 +
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
-+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
-+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++#endif /* !_MACH_TIMER_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/setup_arch_post.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/setup_arch_post.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,63 @@
++/**
++ * machine_specific_* - Hooks for machine specific setup.
++ *
++ * Description:
++ *	This is included late in kernel/setup.c so that it can make
++ *	use of all of the static functions.
++ **/
 +
-+static inline void pud_clear (pud_t * pud)
-+{
-+	set_pud(pud, __pud(0));
-+}
++#include <xen/interface/callback.h>
 +
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
 +
-+static inline void pgd_clear (pgd_t * pgd)
++static void __init machine_specific_arch_setup(void)
 +{
-+        set_pgd(pgd, __pgd(0));
-+        set_pgd(__user_pgd(pgd), __pgd(0));
-+}
++	int ret;
++	static struct callback_register __initdata event = {
++		.type = CALLBACKTYPE_event,
++		.address = (unsigned long) hypervisor_callback,
++	};
++	static struct callback_register __initdata failsafe = {
++		.type = CALLBACKTYPE_failsafe,
++		.address = (unsigned long)failsafe_callback,
++	};
++	static struct callback_register __initdata syscall = {
++		.type = CALLBACKTYPE_syscall,
++		.address = (unsigned long)system_call,
++	};
++#ifdef CONFIG_X86_LOCAL_APIC
++	static struct callback_register __initdata nmi_cb = {
++		.type = CALLBACKTYPE_nmi,
++		.address = (unsigned long)nmi,
++	};
++#endif
 +
-+#define pud_page(pud) \
-+    ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++	if (ret == 0)
++		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++	if (ret == 0)
++		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret == -ENOSYS)
++		ret = HYPERVISOR_set_callbacks(
++			event.address,
++			failsafe.address,
++			syscall.address);
++#endif
++	BUG_ON(ret);
 +
-+#define pte_same(a, b)		((a).pte == (b).pte)
++#ifdef CONFIG_X86_LOCAL_APIC
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++	if (ret == -ENOSYS) {
++		static struct xennmi_callback __initdata cb = {
++			.handler_address = (unsigned long)nmi
++		};
 +
-+#define pte_pgprot(a)	(__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
++		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++	}
++#endif
++#endif
++}
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/mach-xen/setup_arch_pre.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/asm-x86_64/mach-xen/setup_arch_pre.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
 +
-+#define PMD_SIZE	(1UL << PMD_SHIFT)
-+#define PMD_MASK	(~(PMD_SIZE-1))
-+#define PUD_SIZE	(1UL << PUD_SHIFT)
-+#define PUD_MASK	(~(PUD_SIZE-1))
-+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
-+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
++#define ARCH_SETUP machine_specific_arch_setup();
 +
-+#define USER_PTRS_PER_PGD	((TASK_SIZE-1)/PGDIR_SIZE+1)
-+#define FIRST_USER_ADDRESS	0
++static void __init machine_specific_arch_setup(void);
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/msi.h
+--- a/include/asm-x86_64/msi.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/msi.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -7,14 +7,21 @@
+ #define ASM_MSI_H
+ 
+ #include <asm/desc.h>
++#ifndef CONFIG_XEN
+ #include <asm/mach_apic.h>
++#endif
+ #include <asm/smp.h>
+ 
++#ifndef CONFIG_XEN
+ #define LAST_DEVICE_VECTOR	(FIRST_SYSTEM_VECTOR - 1)
++#else
++#define LAST_DYNAMIC_VECTOR 0xdf
++#define LAST_DEVICE_VECTOR	(LAST_DYNAMIC_VECTOR)
++#endif
 +
-+#ifndef __ASSEMBLY__
-+#define MAXMEM		 0x3fffffffffffUL
-+#define VMALLOC_START    0xffffc20000000000UL
-+#define VMALLOC_END      0xffffe1ffffffffffUL
-+#define MODULES_VADDR    0xffffffff88000000UL
-+#define MODULES_END      0xfffffffffff00000UL
-+#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
+ #define MSI_TARGET_CPU_SHIFT	12
+ 
+ extern struct msi_ops msi_apic_ops;
+-
+ static inline int msi_arch_init(void)
+ {
+ 	msi_register(&msi_apic_ops);
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/signal.h
+--- a/include/asm-x86_64/signal.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/signal.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -22,10 +22,6 @@
+ typedef struct {
+ 	unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+-
+-
+-struct pt_regs; 
+-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+ 
+ 
+ #else
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/thread_info.h
+--- a/include/asm-x86_64/thread_info.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/thread_info.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -114,6 +114,7 @@
+ #define TIF_IRET		5	/* force IRET */
+ #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
+ #define TIF_SECCOMP		8	/* secure computing */
++#define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal */
+ /* 16 free */
+ #define TIF_IA32		17	/* 32bit process */ 
+ #define TIF_FORK		18	/* ret_from_fork */
+@@ -128,6 +129,7 @@
+ #define _TIF_IRET		(1<<TIF_IRET)
+ #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
++#define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
+ #define _TIF_IA32		(1<<TIF_IA32)
+ #define _TIF_FORK		(1<<TIF_FORK)
+ #define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
+diff -r d894e36cfc30 -r 0aa021803deb include/asm-x86_64/unistd.h
+--- a/include/asm-x86_64/unistd.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/asm-x86_64/unistd.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -600,9 +600,9 @@
+ #define __NR_faccessat		269
+ __SYSCALL(__NR_faccessat, sys_faccessat)
+ #define __NR_pselect6		270
+-__SYSCALL(__NR_pselect6, sys_ni_syscall)	/* for now */
++__SYSCALL(__NR_pselect6, sys_pselect6)
+ #define __NR_ppoll		271
+-__SYSCALL(__NR_ppoll,	sys_ni_syscall)		/* for now */
++__SYSCALL(__NR_ppoll,	sys_ppoll)
+ #define __NR_unshare		272
+ __SYSCALL(__NR_unshare,	sys_unshare)
+ #define __NR_set_robust_list	273
+@@ -658,6 +658,7 @@
+ #define __ARCH_WANT_SYS_SIGPENDING
+ #define __ARCH_WANT_SYS_SIGPROCMASK
+ #define __ARCH_WANT_SYS_RT_SIGACTION
++#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+ #define __ARCH_WANT_SYS_TIME
+ #define __ARCH_WANT_COMPAT_SYS_TIME
+ 
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/aio.h
+--- a/include/linux/aio.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/aio.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -191,6 +191,11 @@
+ 	struct aio_ring_info	ring_info;
+ 
+ 	struct work_struct	wq;
++#ifdef CONFIG_EPOLL
++	// poll integration
++	wait_queue_head_t       poll_wait;
++	struct file		*file;
++#endif
+ };
+ 
+ /* prototypes */
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/bio.h
+--- a/include/linux/bio.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/bio.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -172,12 +172,27 @@
+ #define bio_offset(bio)		bio_iovec((bio))->bv_offset
+ #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
+ #define bio_sectors(bio)	((bio)->bi_size >> 9)
+-#define bio_cur_sectors(bio)	(bio_iovec(bio)->bv_len >> 9)
+-#define bio_data(bio)		(page_address(bio_page((bio))) + bio_offset((bio)))
+ #define bio_barrier(bio)	((bio)->bi_rw & (1 << BIO_RW_BARRIER))
+ #define bio_sync(bio)		((bio)->bi_rw & (1 << BIO_RW_SYNC))
+ #define bio_failfast(bio)	((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
+ #define bio_rw_ahead(bio)	((bio)->bi_rw & (1 << BIO_RW_AHEAD))
++#define bio_empty_barrier(bio)	(bio_barrier(bio) && !(bio)->bi_size)
 +
-+#define _PAGE_BIT_PRESENT	0
-+#define _PAGE_BIT_RW		1
-+#define _PAGE_BIT_USER		2
-+#define _PAGE_BIT_PWT		3
-+#define _PAGE_BIT_PCD		4
-+#define _PAGE_BIT_ACCESSED	5
-+#define _PAGE_BIT_DIRTY		6
-+#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
-+#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
-+#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
++static inline unsigned int bio_cur_sectors(struct bio *bio)
++{
++	if (bio->bi_vcnt)
++		return bio_iovec(bio)->bv_len >> 9;
 +
-+#define _PAGE_PRESENT	0x001
-+#define _PAGE_RW	0x002
-+#define _PAGE_USER	0x004
-+#define _PAGE_PWT	0x008
-+#define _PAGE_PCD	0x010
-+#define _PAGE_ACCESSED	0x020
-+#define _PAGE_DIRTY	0x040
-+#define _PAGE_PSE	0x080	/* 2MB page */
-+#define _PAGE_FILE	0x040	/* nonlinear file mapping, saved PTE; unset:swap */
-+#define _PAGE_GLOBAL	0x100	/* Global TLB entry */
++	return 0;
++}
 +
-+#define _PAGE_PROTNONE	0x080	/* If not present */
-+#define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
++static inline void *bio_data(struct bio *bio)
++{
++	if (bio->bi_vcnt)
++		return page_address(bio_page(bio)) + bio_offset(bio);
 +
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+extern unsigned int __kernel_page_user;
-+#else
-+#define __kernel_page_user 0
++	return NULL;
++}
+ 
+ /*
+  * will die
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/blkdev.h
+--- a/include/linux/blkdev.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/blkdev.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -506,6 +506,8 @@
+ #define blk_barrier_rq(rq)	((rq)->flags & REQ_HARDBARRIER)
+ #define blk_fua_rq(rq)		((rq)->flags & REQ_FUA)
+ 
++#define blk_empty_barrier(rq)   (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
++
+ #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
+ 
+ #define rq_data_dir(rq)		((rq)->flags & 1)
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/crash_dump.h
+--- a/include/linux/crash_dump.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/crash_dump.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -14,5 +14,13 @@
+ extern const struct file_operations proc_vmcore_operations;
+ extern struct proc_dir_entry *proc_vmcore;
+ 
++/* Architecture code defines this if there are other possible ELF
++ * machine types, e.g. on bi-arch capable hardware. */
++#ifndef vmcore_elf_check_arch_cross
++#define vmcore_elf_check_arch_cross(x) 0
 +#endif
 +
-+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
++#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
++
+ #endif /* CONFIG_CRASH_DUMP */
+ #endif /* LINUX_CRASHDUMP_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/elfnote.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/linux/elfnote.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,104 @@
++#ifndef _LINUX_ELFNOTE_H
++#define _LINUX_ELFNOTE_H
++/*
++ * Helper macros to generate ELF Note structures, which are put into a
++ * PT_NOTE segment of the final vmlinux image.  These are useful for
++ * including name-value pairs of metadata into the kernel binary (or
++ * modules?) for use by external programs.
++ *
++ * Each note has three parts: a name, a type and a desc.  The name is
++ * intended to distinguish the note's originator, so it would be a
++ * company, project, subsystem, etc; it must be in a suitable form for
++ * use in a section name.  The type is an integer which is used to tag
++ * the data, and is considered to be within the "name" namespace (so
++ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
++ * "desc" field is the actual data.  There are no constraints on the
++ * desc field's contents, though typically they're fairly small.
++ *
++ * All notes from a given NAME are put into a section named
++ * .note.NAME.  When the kernel image is finally linked, all the notes
++ * are packed into a single .notes section, which is mapped into the
++ * PT_NOTE segment.  Because notes for a given name are grouped into
++ * the same section, they'll all be adjacent the output file.
++ *
++ * This file defines macros for both C and assembler use.  Their
++ * syntax is slightly different, but they're semantically similar.
++ *
++ * See the ELF specification for more detail about ELF notes.
++ */
 +
-+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++#ifdef __ASSEMBLER__
++/*
++ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
++ * turn out to be the same size and shape), followed by the name and
++ * desc data with appropriate padding.  The 'desctype' argument is the
++ * assembler pseudo op defining the type of the data e.g. .asciz while
++ * 'descdata' is the data itself e.g.  "hello, world".
++ *
++ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
++ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
++ */
++#ifdef __STDC__
++#define ELFNOTE(name, type, desctype, descdata...) \
++.pushsection .note.name			;	\
++  .align 4				;	\
++  .long 2f - 1f		/* namesz */	;	\
++  .long 4f - 3f		/* descsz */	;	\
++  .long type				;	\
++1:.asciz #name				;	\
++2:.align 4				;	\
++3:desctype descdata			;	\
++4:.align 4				;	\
++.popsection
++#else /* !__STDC__, i.e. -traditional */
++#define ELFNOTE(name, type, desctype, descdata) \
++.pushsection .note.name			;	\
++  .align 4				;	\
++  .long 2f - 1f		/* namesz */	;	\
++  .long 4f - 3f		/* descsz */	;	\
++  .long type				;	\
++1:.asciz "name"				;	\
++2:.align 4				;	\
++3:desctype descdata			;	\
++4:.align 4				;	\
++.popsection
++#endif /* __STDC__ */
++#else	/* !__ASSEMBLER__ */
++#include <linux/elf.h>
++/*
++ * Use an anonymous structure which matches the shape of
++ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
++ * type of name and desc depend on the macro arguments.  "name" must
++ * be a literal string, and "desc" must be passed by value.  You may
++ * only define one note per line, since __LINE__ is used to generate
++ * unique symbols.
++ */
++#define _ELFNOTE_PASTE(a,b)	a##b
++#define _ELFNOTE(size, name, unique, type, desc)			\
++	static const struct {						\
++		struct elf##size##_note _nhdr;				\
++		unsigned char _name[sizeof(name)]			\
++		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
++		typeof(desc) _desc					\
++			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
++	} _ELFNOTE_PASTE(_note_, unique)				\
++		__attribute_used__					\
++		__attribute__((section(".note." name),			\
++			       aligned(sizeof(Elf##size##_Word)),	\
++			       unused)) = {				\
++		{							\
++			sizeof(name),					\
++			sizeof(desc),					\
++			type,						\
++		},							\
++		name,							\
++		desc							\
++	}
++#define ELFNOTE(size, name, type, desc)		\
++	_ELFNOTE(size, name, __LINE__, type, desc)
 +
-+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY PAGE_COPY_NOEXEC
-+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define __PAGE_KERNEL \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_EXEC \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
-+#define __PAGE_KERNEL_NOCACHE \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_RO \
-+	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_VSYSCALL \
-+	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
-+	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
-+#define __PAGE_KERNEL_LARGE \
-+	(__PAGE_KERNEL | _PAGE_PSE)
-+#define __PAGE_KERNEL_LARGE_EXEC \
-+	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
++#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
++#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
++#endif	/* __ASSEMBLER__ */
 +
++#endif /* _LINUX_ELFNOTE_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/eventpoll.h
+--- a/include/linux/eventpoll.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/eventpoll.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -90,6 +90,12 @@
+ 	eventpoll_release_file(file);
+ }
+ 
 +/*
-+ * We don't support GLOBAL page in xenolinux64
++ * called by aio code to create fd that can poll the  aio event queueQ
 + */
-+#define MAKE_GLOBAL(x) __pgprot((x))
-+
-+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
++struct eventpoll;
++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
++             struct eventpoll *ep, const struct file_operations *fops);
+ #else
+ 
+ static inline void eventpoll_init_file(struct file *file) {}
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/highmem.h
+--- a/include/linux/highmem.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/highmem.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -24,10 +24,16 @@
+ 
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
++#ifdef CONFIG_XEN
++void kmap_flush_unused(void);
++#endif
+ 
+ #else /* CONFIG_HIGHMEM */
+ 
+ static inline unsigned int nr_free_highpages(void) { return 0; }
++#ifdef CONFIG_XEN
++static inline void kmap_flush_unused(void) { }
++#endif
+ 
+ static inline void *kmap(struct page *page)
+ {
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/interrupt.h
+--- a/include/linux/interrupt.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/interrupt.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -166,6 +166,12 @@
+ 
+ #endif /* CONFIG_GENERIC_HARDIRQS */
+ 
++#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
++int irq_ignore_unhandled(unsigned int irq);
++#else
++#define irq_ignore_unhandled(irq) 0
++#endif
 +
-+/*         xwr */
-+#define __P000	PAGE_NONE
-+#define __P001	PAGE_READONLY
-+#define __P010	PAGE_COPY
-+#define __P011	PAGE_COPY
-+#define __P100	PAGE_READONLY_EXEC
-+#define __P101	PAGE_READONLY_EXEC
-+#define __P110	PAGE_COPY_EXEC
-+#define __P111	PAGE_COPY_EXEC
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+ #define set_softirq_pending(x) (local_softirq_pending() = (x))
+ #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/ioport.h
+--- a/include/linux/ioport.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/ioport.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -93,6 +93,9 @@
+ /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+ extern struct resource ioport_resource;
+ extern struct resource iomem_resource;
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++extern struct resource iomem_machine_resource;
++#endif
+ 
+ extern int request_resource(struct resource *root, struct resource *new);
+ extern struct resource * ____request_resource(struct resource *root, struct resource *new);
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/kexec.h
+--- a/include/linux/kexec.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/kexec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -29,6 +29,13 @@
+ 
+ #ifndef KEXEC_ARCH
+ #error KEXEC_ARCH not defined
++#endif
 +
-+#define __S000	PAGE_NONE
-+#define __S001	PAGE_READONLY
-+#define __S010	PAGE_SHARED
-+#define __S011	PAGE_SHARED
-+#define __S100	PAGE_READONLY_EXEC
-+#define __S101	PAGE_READONLY_EXEC
-+#define __S110	PAGE_SHARED_EXEC
-+#define __S111	PAGE_SHARED_EXEC
++#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page)  page_to_pfn(page)
++#define kexec_pfn_to_page(pfn)   pfn_to_page(pfn)
++#define kexec_virt_to_phys(addr) virt_to_phys(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(addr)
+ #endif
+ 
+ /*
+@@ -91,6 +98,12 @@
+ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
+ extern int machine_kexec_prepare(struct kimage *image);
+ extern void machine_kexec_cleanup(struct kimage *image);
++#ifdef CONFIG_XEN
++extern int xen_machine_kexec_load(struct kimage *image);
++extern void xen_machine_kexec_unload(struct kimage *image);
++extern void xen_machine_kexec_setup_resources(void);
++extern void xen_machine_kexec_register_resources(struct resource *res);
++#endif
+ extern asmlinkage long sys_kexec_load(unsigned long entry,
+ 					unsigned long nr_segments,
+ 					struct kexec_segment __user *segments,
+@@ -107,6 +120,10 @@
+ int kexec_should_crash(struct task_struct *);
+ extern struct kimage *kexec_image;
+ extern struct kimage *kexec_crash_image;
++
++#ifndef kexec_flush_icache_page
++#define kexec_flush_icache_page(page)
++#endif
+ 
+ #define KEXEC_ON_CRASH  0x00000001
+ #define KEXEC_ARCH_MASK 0xffff0000
+@@ -131,6 +148,7 @@
+ typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
+ extern note_buf_t *crash_notes;
+ 
++
+ #else /* !CONFIG_KEXEC */
+ struct pt_regs;
+ struct task_struct;
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/mm.h
+--- a/include/linux/mm.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/mm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -164,6 +164,10 @@
+ #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+ #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+ #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
++#ifdef CONFIG_XEN
++#define VM_FOREIGN	0x04000000	/* Has pages belonging to another VM */
++#endif
++#define VM_ALWAYSDUMP	0x08000000	/* Always include in core dumps */
+ 
+ #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+@@ -202,6 +206,10 @@
+ 	/* notification that a previously read-only page is about to become
+ 	 * writable, if an error is returned it will cause a SIGBUS */
+ 	int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++	/* Area-specific function for clearing the PTE at @ptep. Returns the
++	 * original value of @ptep. */
++	pte_t (*zap_pte)(struct vm_area_struct *vma, 
++			 unsigned long addr, pte_t *ptep, int is_fullmm);
+ #ifdef CONFIG_NUMA
+ 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+@@ -1027,6 +1035,13 @@
+ #define FOLL_GET	0x04	/* do get_page on page */
+ #define FOLL_ANON	0x08	/* give ZERO_PAGE if no pgtable */
+ 
++#ifdef CONFIG_XEN
++typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
++			void *data);
++extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
++			       unsigned long size, pte_fn_t fn, void *data);
++#endif
 +
-+static inline unsigned long pgd_bad(pgd_t pgd)
-+{
-+       unsigned long val = __pgd_val(pgd);
-+       val &= ~PTE_MASK;
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY);
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
-+}
+ #ifdef CONFIG_PROC_FS
+ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+ #else
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/moduleparam.h
+--- a/include/linux/moduleparam.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/moduleparam.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -58,13 +58,23 @@
+ 	void *elem;
+ };
+ 
++/* On alpha, ia64 and ppc64 relocations to global data cannot go into
++   read-only sections (which is part of respective UNIX ABI on these
++   platforms). So 'const' makes no sense and even causes compile failures
++   with some compilers. */
++#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
++#define __moduleparam_const
++#else
++#define __moduleparam_const const
++#endif
++
+ /* This is the fundamental function for registering boot/module
+    parameters.  perm sets the visibility in driverfs: 000 means it's
+    not there, read bits mean it's readable, write bits mean it's
+    writable. */
+ #define __module_param_call(prefix, name, set, get, arg, perm)		\
+ 	static char __param_str_##name[] = prefix #name;		\
+-	static struct kernel_param const __param_##name			\
++	static struct kernel_param __moduleparam_const __param_##name	\
+ 	__attribute_used__						\
+     __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ 	= { __param_str_##name, perm, set, get, arg }
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/netfilter_bridge.h
+--- a/include/linux/netfilter_bridge.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/netfilter_bridge.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -7,6 +7,7 @@
+ #include <linux/netfilter.h>
+ #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER)
+ #include <linux/if_ether.h>
++#include <linux/if_vlan.h>
+ #endif
+ 
+ /* Bridge Hooks */
+@@ -54,16 +55,19 @@
+ 
+ 	if (skb->nf_bridge) {
+ 		if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+-			err = skb_cow(skb, 18);
++			int header_size = ETH_HLEN + VLAN_HLEN;
++
++			err = skb_cow(skb, header_size);
+ 			if (err)
+ 				return err;
+-			memcpy(skb->data - 18, skb->nf_bridge->data, 18);
+-			skb_push(skb, 4);
++			memcpy(skb->data - header_size, skb->nf_bridge->data,
++			       header_size);
++			skb_push(skb, VLAN_HLEN);
+ 		} else {
+-			err = skb_cow(skb, 16);
++			err = skb_cow(skb, ETH_HLEN);
+ 			if (err)
+ 				return err;
+-			memcpy(skb->data - 16, skb->nf_bridge->data, 16);
++			memcpy(skb->data - ETH_HLEN, skb->nf_bridge->data, ETH_HLEN);
+ 		}
+ 	}
+ 	return 0;
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/oprofile.h
+--- a/include/linux/oprofile.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/oprofile.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -16,6 +16,8 @@
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <asm/atomic.h>
 +
-+static inline unsigned long pud_bad(pud_t pud) 
-+{ 
-+       unsigned long val = __pud_val(pud);
-+       val &= ~PTE_MASK; 
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
-+} 
++#include <xen/interface/xenoprof.h>
+  
+ struct super_block;
+ struct dentry;
+@@ -27,6 +29,11 @@
+ 	/* create any necessary configuration files in the oprofile fs.
+ 	 * Optional. */
+ 	int (*create_files)(struct super_block * sb, struct dentry * root);
++	/* setup active domains with Xen */
++	int (*set_active)(int *active_domains, unsigned int adomains);
++        /* setup passive domains with Xen */
++        int (*set_passive)(int *passive_domains, unsigned int pdomains);
++	
+ 	/* Do any necessary interrupt setup. Optional. */
+ 	int (*setup)(void);
+ 	/* Do any necessary interrupt shutdown. Optional. */
+@@ -78,6 +85,8 @@
+ /* add a backtrace entry, to be called from the ->backtrace callback */
+ void oprofile_add_trace(unsigned long eip);
+ 
++/* add a domain switch entry */
++int oprofile_add_domain_switch(int32_t domain_id);
+ 
+ /**
+  * Create a file of the given name as a child of the given root, with
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/page-flags.h
+--- a/include/linux/page-flags.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/page-flags.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -97,6 +97,8 @@
+  */
+ #define PG_uncached		31	/* Page has been mapped as uncached */
+ #endif
 +
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
++#define PG_foreign		20	/* Page is owned by foreign allocator. */
+ 
+ /*
+  * Manipulation of page state flags
+@@ -247,6 +249,19 @@
+ #define SetPageUncached(page)	set_bit(PG_uncached, &(page)->flags)
+ #define ClearPageUncached(page)	clear_bit(PG_uncached, &(page)->flags)
+ 
++#define PageForeign(page)	test_bit(PG_foreign, &(page)->flags)
++#define SetPageForeign(_page, dtor) do {		\
++	set_bit(PG_foreign, &(_page)->flags);		\
++	BUG_ON((dtor) == (void (*)(struct page *))0);	\
++	(_page)->index = (long)(dtor);			\
 +} while (0)
++#define ClearPageForeign(page) do {			\
++	clear_bit(PG_foreign, &(page)->flags);		\
++	(page)->index = 0;				\
++} while (0)
++#define PageForeignDestructor(_page)			\
++	((void (*)(struct page *))(_page)->index)(_page)
 +
-+#define pte_none(x)	(!(x).pte)
-+#define pte_present(x)	((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
-+#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
-+	__pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
-+	mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
-+
-+#define pte_page(x)	pfn_to_page(pte_pfn(x))
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	unsigned long pte = page_nr << PAGE_SHIFT;
-+	pte |= pgprot_val(pgprot);
-+	pte &= __supported_pte_mask;
-+	return __pte(pte);
-+}
-+
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	if (!pte_none(pte)) {
-+		if (mm != &init_mm)
-+			pte = __pte_ma(xchg(&ptep->pte, 0));
-+		else
-+			HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
-+	}
-+	return pte;
-+}
-+
-+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+ struct page;	/* forward declaration */
+ 
+ int test_clear_page_dirty(struct page *page);
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/pci.h
+--- a/include/linux/pci.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/pci.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -152,6 +152,9 @@
+ 	 * directly, use the values stored here. They might be different!
+ 	 */
+ 	unsigned int	irq;
++#ifdef CONFIG_XEN
++	unsigned int    irq_old;
++#endif
+ 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+ 
+ 	/* These fields are used by common fixups */
+@@ -596,6 +599,10 @@
+ 	struct msix_entry *entries, int nvec) {return -1;}
+ static inline void pci_disable_msix(struct pci_dev *dev) {}
+ static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
++#ifdef CONFIG_XEN
++#define register_msi_get_owner(func) 0
++#define unregister_msi_get_owner(func) 0
++#endif
+ #else
+ extern void pci_scan_msi_device(struct pci_dev *dev);
+ extern int pci_enable_msi(struct pci_dev *dev);
+@@ -604,6 +611,10 @@
+ 	struct msix_entry *entries, int nvec);
+ extern void pci_disable_msix(struct pci_dev *dev);
+ extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
++#ifdef CONFIG_XEN
++extern int register_msi_get_owner(int (*func)(struct pci_dev *dev));
++extern int unregister_msi_get_owner(int (*func)(struct pci_dev *dev));
++#endif
+ #endif
+ 
+ extern void pci_block_user_cfg_access(struct pci_dev *dev);
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/sched.h
+--- a/include/linux/sched.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/sched.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -211,10 +211,15 @@
+ extern void scheduler_tick(void);
+ 
+ #ifdef CONFIG_DETECT_SOFTLOCKUP
++extern unsigned long softlockup_get_next_event(void);
+ extern void softlockup_tick(void);
+ extern void spawn_softlockup_task(void);
+ extern void touch_softlockup_watchdog(void);
+ #else
++static inline unsigned long softlockup_get_next_event(void)
 +{
-+	if (full) {
-+		pte_t pte = *ptep;
-+		if (mm->context.pinned)
-+			xen_l1_entry_update(ptep, __pte(0));
-+		else
-+			*ptep = __pte(0);
-+		return pte;
-+	}
-+	return ptep_get_and_clear(mm, addr, ptep);
++	return MAX_JIFFY_OFFSET;
 +}
-+
-+#define ptep_clear_flush(vma, addr, ptep)			\
-+({								\
-+	pte_t *__ptep = (ptep);					\
-+	pte_t __res = *__ptep;					\
-+	if (!pte_none(__res) &&					\
-+	    ((vma)->vm_mm != current->mm ||			\
-+	     HYPERVISOR_update_va_mapping(addr,	__pte(0), 	\
-+			(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+				UVMF_INVLPG|UVMF_MULTI))) {	\
-+		__ptep->pte = 0;				\
-+		flush_tlb_page(vma, addr);			\
-+	}							\
-+	__res;							\
-+})
-+
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
+ static inline void softlockup_tick(void)
+ {
+ }
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/skbuff.h
+--- a/include/linux/skbuff.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/skbuff.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -203,6 +203,8 @@
+  *	@local_df: allow local fragmentation
+  *	@cloned: Head may be cloned (check refcnt to be sure)
+  *	@nohdr: Payload reference only, must not modify header
++ *	@proto_data_valid: Protocol data validated since arriving at localhost
++ *	@proto_csum_blank: Protocol csum must be added before leaving localhost
+  *	@pkt_type: Packet class
+  *	@fclone: skbuff clone status
+  *	@ip_summed: Driver fed us an IP checksum
+@@ -282,7 +284,13 @@
+ 				nfctinfo:3;
+ 	__u8			pkt_type:3,
+ 				fclone:2,
++#ifndef CONFIG_XEN
+ 				ipvs_property:1;
++#else
++				ipvs_property:1,
++				proto_data_valid:1,
++				proto_csum_blank:1;
++#endif
+ 	__be16			protocol;
+ 
+ 	void			(*destructor)(struct sk_buff *skb);
+diff -r d894e36cfc30 -r 0aa021803deb include/linux/sysctl.h
+--- a/include/linux/sysctl.h	Tue Sep 09 11:37:38 2008 +0200
++++ b/include/linux/sysctl.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -6,10 +6,17 @@
+  ****************************************************************
+  ****************************************************************
+  **
++ **  WARNING:
+  **  The values in this file are exported to user space via 
+- **  the sysctl() binary interface.  However this interface
+- **  is unstable and deprecated and will be removed in the future. 
+- **  For a stable interface use /proc/sys.
++ **  the sysctl() binary interface.  Do *NOT* change the
++ **  numbering of any existing values here, and do not change
++ **  any numbers within any one set of values.  If you have to
++ **  have to redefine an existing interface, use a new number for it.
++ **  The kernel will then return -ENOTDIR to any application using
++ **  the old binary interface.
++ **
++ **  For new interfaces unless you really need a binary number
++ **  please use CTL_UNNUMBERED.
+  **
+  ****************************************************************
+  ****************************************************************
+@@ -48,6 +55,7 @@
+ #ifdef __KERNEL__
+ #define CTL_ANY		-1	/* Matches any name */
+ #define CTL_NONE	0
++#define CTL_UNNUMBERED	CTL_NONE	/* sysctl without a binary number */
+ #endif
+ 
+ enum
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/balloon.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/balloon.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,57 @@
++/******************************************************************************
++ * balloon.h
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
-+static inline int pte_user(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_read(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_exec(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_dirty(pte_t pte)		{ return __pte_val(pte) & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte)		{ return __pte_val(pte) & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte)		{ return __pte_val(pte) & _PAGE_RW; }
-+static inline int pte_file(pte_t pte)		{ return __pte_val(pte) & _PAGE_FILE; }
-+static inline int pte_huge(pte_t pte)		{ return __pte_val(pte) & _PAGE_PSE; }
-+
-+static inline pte_t pte_rdprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte)	{ __pte_val(pte) |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte)	{ __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte)	{ __pte_val(pte) |= _PAGE_RW; return pte; }
-+static inline pte_t pte_mkhuge(pte_t pte)	{ __pte_val(pte) |= _PAGE_PSE; return pte; }
-+
-+#define ptep_test_and_clear_dirty(vma, addr, ptep)			\
-+({									\
-+	pte_t __pte = *(ptep);						\
-+	int __ret = pte_dirty(__pte);					\
-+	if (__ret)							\
-+		set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
-+	__ret;								\
-+})
-+
-+#define ptep_test_and_clear_young(vma, addr, ptep)			\
-+({									\
-+	pte_t __pte = *(ptep);						\
-+	int __ret = pte_young(__pte);					\
-+	if (__ret)							\
-+		set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
-+	__ret;								\
-+})
 +
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	if (pte_write(pte))
-+		set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
-+}
++#ifndef __ASM_BALLOON_H__
++#define __ASM_BALLOON_H__
 +
 +/*
-+ * Macro to mark a page protection value as "uncacheable".
++ * Inform the balloon driver that it should allow some slop for device-driver
++ * memory activities.
 + */
-+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
++void balloon_update_driver_allowance(long delta);
 +
-+static inline int pmd_large(pmd_t pte) { 
-+	return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
-+} 	
++/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
++struct page **alloc_empty_pages_and_pagevec(int nr_pages);
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
 +
++void balloon_release_driver_page(struct page *page);
 +
 +/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
++ * Prevent the balloon driver from changing the memory reservation during
++ * a driver critical region.
 + */
++extern spinlock_t balloon_lock;
++#define balloon_lock(__flags)   spin_lock_irqsave(&balloon_lock, __flags)
++#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
 +
-+/*
-+ * Level 4 access.
-+ * Never use these in the common code.
++#endif /* __ASM_BALLOON_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/blkif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/blkif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,123 @@
++/* 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
 + */
-+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-+#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
-+#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
-+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
-+
-+/* PUD - Level3 access */
-+/* to find an entry in a page-table-directory. */
-+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
-+#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
 +
-+/* PMD  - Level 2 access */
-+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++#ifndef __XEN_BLKIF_H__
++#define __XEN_BLKIF_H__
 +
-+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
-+                                  pmd_index(address))
-+#define pmd_none(x)	(!__pmd_val(x))
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+   can temporarily clear it. */
-+#define pmd_present(x)	(__pmd_val(x))
-+#else
-+#define pmd_present(x)	(__pmd_val(x) & _PAGE_PRESENT)
-+#endif
-+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-+#define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
-+		    != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
-+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-+#define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/protocols.h>
 +
-+#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
-+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
++/* Not a real protocol.  Used to generate ring structs which contain
++ * the elements common to all protocols only.  This way we get a
++ * compiler-checkable way to use common struct elements, so we can
++ * avoid using switch(protocol) in a number of places.  */
++struct blkif_common_request {
++	char dummy;
++};
++struct blkif_common_response {
++	char dummy;
++};
 +
-+/* PTE - Level 1 access. */
++/* i386 protocol version */
++#pragma pack(push, 4)
++struct blkif_x86_32_request {
++	uint8_t        operation;    /* BLKIF_OP_???                         */
++	uint8_t        nr_segments;  /* number of segments                   */
++	blkif_vdev_t   handle;       /* only for read/write requests         */
++	uint64_t       id;           /* private guest value, echoed in resp  */
++	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
++	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_32_response {
++	uint64_t        id;              /* copied from request */
++	uint8_t         operation;       /* copied from request */
++	int16_t         status;          /* BLKIF_RSP_???       */
++};
++typedef struct blkif_x86_32_request blkif_x86_32_request_t;
++typedef struct blkif_x86_32_response blkif_x86_32_response_t;
++#pragma pack(pop)
 +
-+/* page, protection -> pte */
-+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
-+#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
-+ 
-+/* physical address -> PTE */
-+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-+{ 
-+	unsigned long pteval;
-+	pteval = physpage | pgprot_val(pgprot);
-+	return __pte(pteval);
-+}
-+ 
-+/* Change flags of a PTE */
-+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{ 
-+	/*
-+	 * Since this might change the present bit (which controls whether
-+	 * a pte_t object has undergone p2m translation), we must use
-+	 * pte_val() on the input pte and __pte() for the return value.
-+	 */
-+	unsigned long pteval = pte_val(pte);
++/* x86_64 protocol version */
++struct blkif_x86_64_request {
++	uint8_t        operation;    /* BLKIF_OP_???                         */
++	uint8_t        nr_segments;  /* number of segments                   */
++	blkif_vdev_t   handle;       /* only for read/write requests         */
++	uint64_t       __attribute__((__aligned__(8))) id;
++	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
++	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_64_response {
++	uint64_t       __attribute__((__aligned__(8))) id;
++	uint8_t         operation;       /* copied from request */
++	int16_t         status;          /* BLKIF_RSP_???       */
++};
++typedef struct blkif_x86_64_request blkif_x86_64_request_t;
++typedef struct blkif_x86_64_response blkif_x86_64_response_t;
 +
-+	pteval &= _PAGE_CHG_MASK;
-+	pteval |= pgprot_val(newprot);
-+	pteval &= __supported_pte_mask;
-+	return __pte(pteval);
-+}
++DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
++DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
++DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
 +
-+#define pte_index(address) \
-+		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
-+			pte_index(address))
++union blkif_back_rings {
++	blkif_back_ring_t        native;
++	blkif_common_back_ring_t common;
++	blkif_x86_32_back_ring_t x86_32;
++	blkif_x86_64_back_ring_t x86_64;
++};
++typedef union blkif_back_rings blkif_back_rings_t;
 +
-+/* x86-64 always has all page tables mapped. */
-+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
-+#define pte_unmap(pte) /* NOP */
-+#define pte_unmap_nested(pte) /* NOP */ 
++enum blkif_protocol {
++	BLKIF_PROTOCOL_NATIVE = 1,
++	BLKIF_PROTOCOL_X86_32 = 2,
++	BLKIF_PROTOCOL_X86_64 = 3,
++};
 +
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
++static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
++{
++	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++	dst->operation = src->operation;
++	dst->nr_segments = src->nr_segments;
++	dst->handle = src->handle;
++	dst->id = src->id;
++	dst->sector_number = src->sector_number;
++	barrier();
++	if (n > dst->nr_segments)
++		n = dst->nr_segments;
++	for (i = 0; i < n; i++)
++		dst->seg[i] = src->seg[i];
++}
++
++static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
++{
++	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++	dst->operation = src->operation;
++	dst->nr_segments = src->nr_segments;
++	dst->handle = src->handle;
++	dst->id = src->id;
++	dst->sector_number = src->sector_number;
++	barrier();
++	if (n > dst->nr_segments)
++		n = dst->nr_segments;
++	for (i = 0; i < n; i++)
++		dst->seg[i] = src->seg[i];
++}
 +
++#endif /* __XEN_BLKIF_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/compat_ioctl.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/compat_ioctl.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,45 @@
 +/*
-+ * Rules for using ptep_establish: the pte MUST be a user pte, and
-+ * must be a present->present transition.
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
++ *
++ * Copyright IBM Corp. 2007
++ *
++ * Authors: Jimi Xenidis <jimix at watson.ibm.com>
++ *          Hollis Blanchard <hollisb at us.ibm.com>
 + */
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(vma, address, ptep, pteval)			\
-+	do {								\
-+		if ( likely((vma)->vm_mm == current->mm) ) {		\
-+			BUG_ON(HYPERVISOR_update_va_mapping(address,	\
-+				pteval,					\
-+				(unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+					UVMF_INVLPG|UVMF_MULTI));	\
-+		} else {						\
-+			xen_l1_entry_update(ptep, pteval);		\
-+			flush_tlb_page(vma, address);			\
-+		}							\
-+	} while (0)
-+
-+/* We only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time. */
-+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
-+	do {								\
-+		if (dirty)						\
-+			ptep_establish(vma, address, ptep, entry);	\
-+	} while (0)
 +
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val >> 1) & 0x3f)
-+#define __swp_offset(x)			((x).val >> 8)
-+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
-+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
++#ifndef __LINUX_XEN_COMPAT_H__ 
++#define __LINUX_XEN_COMPAT_H__ 
 +
-+extern spinlock_t pgd_lock;
-+extern struct page *pgd_list;
-+void vmalloc_sync_all(void);
++#include <linux/compat.h>
 +
-+#endif /* !__ASSEMBLY__ */
++extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg);
++struct privcmd_mmap_32 {
++	int num;
++	domid_t dom;
++	compat_uptr_t entry;
++};
 +
-+extern int kern_addr_valid(unsigned long addr); 
++struct privcmd_mmapbatch_32 {
++	int num;     /* number of pages to populate */
++	domid_t dom; /* target domain */
++	__u64 addr;  /* virtual address */
++	compat_uptr_t arr; /* array of mfns - top nibble set on err */
++};
++#define IOCTL_PRIVCMD_MMAP_32                   \
++	_IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32))
++#define IOCTL_PRIVCMD_MMAPBATCH_32                  \
++	_IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32))
++
++#endif /* __LINUX_XEN_COMPAT_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/cpu_hotplug.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/cpu_hotplug.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,41 @@
++#ifndef __XEN_CPU_HOTPLUG_H__
++#define __XEN_CPU_HOTPLUG_H__
 +
-+#define DOMID_LOCAL (0xFFFFU)
++#include <linux/kernel.h>
++#include <linux/cpumask.h>
 +
-+struct vm_area_struct;
++#if defined(CONFIG_X86) && defined(CONFIG_SMP)
++extern cpumask_t cpu_initialized_map;
++#endif
 +
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+                            unsigned long address,
-+                            unsigned long mfn,
-+                            unsigned long size,
-+                            pgprot_t prot,
-+                            domid_t  domid);
++#if defined(CONFIG_HOTPLUG_CPU)
 +
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid);
++int cpu_up_check(unsigned int cpu);
++void init_xenbus_allowed_cpumask(void);
++int smp_suspend(void);
++void smp_resume(void);
 +
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+                           unsigned long address,
-+                           uint64_t *ptep);
++void cpu_bringup(void);
 +
-+int touch_pte_range(struct mm_struct *mm,
-+                    unsigned long address,
-+                    unsigned long size);
++#else /* !defined(CONFIG_HOTPLUG_CPU) */
 +
-+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-+		direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
++#define cpu_up_check(cpu)		(0)
++#define init_xenbus_allowed_cpumask()	((void)0)
 +
-+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
-+#define GET_IOSPACE(pfn)		0
-+#define GET_PFN(pfn)			(pfn)
++static inline int smp_suspend(void)
++{
++	if (num_online_cpus() > 1) {
++		printk(KERN_WARNING "Can't suspend SMP guests "
++		       "without CONFIG_HOTPLUG_CPU\n");
++		return -EOPNOTSUPP;
++	}
++	return 0;
++}
 +
-+#define HAVE_ARCH_UNMAPPED_AREA
++static inline void smp_resume(void)
++{
++}
 +
-+#define pgtable_cache_init()   do { } while (0)
-+#define check_pgt_cache()      do { } while (0)
++#endif /* !defined(CONFIG_HOTPLUG_CPU) */
 +
-+#define PAGE_AGP    PAGE_KERNEL_NOCACHE
-+#define HAVE_PAGE_AGP 1
++#endif /* __XEN_CPU_HOTPLUG_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/driver_util.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/driver_util.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,14 @@
 +
-+/* fs/proc/kcore.c */
-+#define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-+#define	kc_offset_to_vaddr(o) \
-+   (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
++#ifndef __ASM_XEN_DRIVER_UTIL_H__
++#define __ASM_XEN_DRIVER_UTIL_H__
 +
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
-+#include <asm-generic/pgtable.h>
++#include <linux/vmalloc.h>
++#include <linux/device.h>
 +
-+#endif /* _X86_64_PGTABLE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/processor.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/processor.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/processor.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,506 @@
-+/*
-+ * include/asm-x86_64/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
-+ */
++/* Allocate/destroy a 'vmalloc' VM area. */
++extern struct vm_struct *alloc_vm_area(unsigned long size);
++extern void free_vm_area(struct vm_struct *area);
 +
-+#ifndef __ASM_X86_64_PROCESSOR_H
-+#define __ASM_X86_64_PROCESSOR_H
++extern struct class *get_xen_class(void);
 +
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <linux/threads.h>
-+#include <asm/msr.h>
-+#include <asm/current.h>
-+#include <asm/system.h>
-+#include <asm/mmsegment.h>
-+#include <asm/percpu.h>
-+#include <linux/personality.h>
-+#include <linux/cpumask.h>
++#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/evtchn.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/evtchn.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,160 @@
++/******************************************************************************
++ * evtchn.h
++ * 
++ * Communication via Xen event channels.
++ * Also definitions for the device that demuxes notifications to userspace.
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+#define TF_MASK		0x00000100
-+#define IF_MASK		0x00000200
-+#define IOPL_MASK	0x00003000
-+#define NT_MASK		0x00004000
-+#define VM_MASK		0x00020000
-+#define AC_MASK		0x00040000
-+#define VIF_MASK	0x00080000	/* virtual interrupt flag */
-+#define VIP_MASK	0x00100000	/* virtual interrupt pending */
-+#define ID_MASK		0x00200000
++#ifndef __ASM_EVTCHN_H__
++#define __ASM_EVTCHN_H__
 +
-+#define desc_empty(desc) \
-+               (!((desc)->a | (desc)->b))
++#include <linux/interrupt.h>
++#include <asm/hypervisor.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/interface/event_channel.h>
++#include <linux/smp.h>
 +
-+#define desc_equal(desc1, desc2) \
-+               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++/*
++ * LOW-LEVEL DEFINITIONS
++ */
 +
 +/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
++ * Dynamically bind an event source to an IRQ-like callback handler.
++ * On some platforms this may not be implemented via the Linux IRQ subsystem.
++ * The IRQ argument passed to the callback handler is the same as returned
++ * from the bind call. It may not correspond to a Linux IRQ number.
++ * Returns IRQ or negative errno.
 + */
-+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
++int bind_caller_port_to_irqhandler(
++	unsigned int caller_port,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++int bind_listening_port_to_irqhandler(
++	unsigned int remote_domain,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++int bind_interdomain_evtchn_to_irqhandler(
++	unsigned int remote_domain,
++	unsigned int remote_port,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++int bind_virq_to_irqhandler(
++	unsigned int virq,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++int bind_ipi_to_irqhandler(
++	unsigned int ipi,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
 +
 +/*
-+ *  CPU type and hardware bug flags. Kept separately for each CPU.
++ * Common unbind function for all event sources. Takes IRQ to unbind from.
++ * Automatically closes the underlying event channel (except for bindings
++ * made with bind_caller_port_to_irqhandler()).
 + */
++void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 +
-+struct cpuinfo_x86 {
-+	__u8	x86;		/* CPU family */
-+	__u8	x86_vendor;	/* CPU vendor */
-+	__u8	x86_model;
-+	__u8	x86_mask;
-+	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-+	__u32	x86_capability[NCAPINTS];
-+	char	x86_vendor_id[16];
-+	char	x86_model_id[64];
-+	int 	x86_cache_size;  /* in KB */
-+	int	x86_clflush_size;
-+	int	x86_cache_alignment;
-+	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
-+        __u8    x86_virt_bits, x86_phys_bits;
-+	__u8	x86_max_cores;	/* cpuid returned max cores value */
-+        __u32   x86_power; 	
-+	__u32   extended_cpuid_level;	/* Max extended CPUID function supported */
-+	unsigned long loops_per_jiffy;
-+#ifdef CONFIG_SMP
-+	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
-+#endif
-+	__u8	apicid;
-+#ifdef CONFIG_SMP
-+	__u8	booted_cores;	/* number of cores as seen by OS */
-+	__u8	phys_proc_id;	/* Physical Processor id. */
-+	__u8	cpu_core_id;	/* Core id. */
-+#endif
-+} ____cacheline_aligned;
++void irq_resume(void);
 +
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NUM 8
-+#define X86_VENDOR_UNKNOWN 0xff
++/* Entry point for notifications into Linux subsystems. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++
++/* Entry point for notifications into the userland character device. */
++void evtchn_device_upcall(int port);
++
++/* Mark a PIRQ as unavailable for dynamic allocation. */
++void evtchn_register_pirq(int irq);
++/* Map a Xen-supplied PIRQ to a dynamically allocated one. */
++int evtchn_map_pirq(int irq, int xen_pirq);
++/* Look up a Xen-supplied PIRQ for a dynamically allocated one. */
++int evtchn_get_xen_pirq(int irq);
++
++void mask_evtchn(int port);
++void disable_all_local_evtchn(void);
++void unmask_evtchn(int port);
 +
 +#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
++void rebind_evtchn_to_cpu(int port, unsigned int cpu);
 +#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
++#define rebind_evtchn_to_cpu(port, cpu)	((void)0)
 +#endif
 +
-+extern char ignore_irq13;
++static inline int test_and_set_evtchn_mask(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	return synch_test_and_set_bit(port, s->evtchn_mask);
++}
 +
-+extern void identify_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+extern unsigned short num_cache_leaves;
++static inline void clear_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	synch_clear_bit(port, s->evtchn_pending);
++}
 +
-+/*
-+ * EFLAGS bits
-+ */
-+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
-+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
-+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
-+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
-+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
-+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
-+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
-+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
-+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
-+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
-+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
-+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
-+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
-+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
-+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
-+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
-+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
++static inline void notify_remote_via_evtchn(int port)
++{
++	struct evtchn_send send = { .port = port };
++	VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send));
++}
 +
 +/*
-+ * Intel CPU features in CR4
++ * Use these to access the event channel underlying the IRQ handle returned
++ * by bind_*_to_irqhandler().
 + */
-+#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
-+#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
-+#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
-+#define X86_CR4_DE		0x0008	/* enable debugging extensions */
-+#define X86_CR4_PSE		0x0010	/* enable page size extensions */
-+#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
-+#define X86_CR4_MCE		0x0040	/* Machine check enable */
-+#define X86_CR4_PGE		0x0080	/* enable global pages */
-+#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
-+#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
-+#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
++void notify_remote_via_irq(int irq);
++int irq_to_evtchn_port(int irq);
 +
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
++#define PIRQ_SET_MAPPING 0x0
++#define PIRQ_CLEAR_MAPPING 0x1
++#define PIRQ_GET_MAPPING 0x3
++int pirq_mapstatus(int pirq, int action);
++int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action));
++int clear_pirq_hw_action(int pirq);
++
++#define PIRQ_STARTUP 1
++#define PIRQ_SHUTDOWN 2
++#define PIRQ_ENABLE 3
++#define PIRQ_DISABLE 4
++#define PIRQ_END 5
++#define PIRQ_ACK 6
++
++#endif /* __ASM_EVTCHN_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/features.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/features.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,20 @@
++/******************************************************************************
++ * features.h
++ *
++ * Query the features reported by Xen.
++ *
++ * Copyright (c) 2006, Ian Campbell
 + */
-+extern unsigned long mmu_cr4_features;
 +
-+static inline void set_in_cr4 (unsigned long mask)
-+{
-+	mmu_cr4_features |= mask;
-+	__asm__("movq %%cr4,%%rax\n\t"
-+		"orq %0,%%rax\n\t"
-+		"movq %%rax,%%cr4\n"
-+		: : "irg" (mask)
-+		:"ax");
-+}
++#ifndef __ASM_XEN_FEATURES_H__
++#define __ASM_XEN_FEATURES_H__
 +
-+static inline void clear_in_cr4 (unsigned long mask)
-+{
-+	mmu_cr4_features &= ~mask;
-+	__asm__("movq %%cr4,%%rax\n\t"
-+		"andq %0,%%rax\n\t"
-+		"movq %%rax,%%cr4\n"
-+		: : "irg" (~mask)
-+		:"ax");
-+}
++#include <xen/interface/version.h>
++
++extern void setup_xen_features(void);
++
++extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
++
++#define xen_feature(flag)	(xen_features[flag])
++
++#endif /* __ASM_XEN_FEATURES_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/firmware.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/firmware.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,10 @@
++#ifndef __XEN_FIRMWARE_H__
++#define __XEN_FIRMWARE_H__
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void copy_edd(void);
++#endif
++
++void copy_edid(void);
++
++#endif /* __XEN_FIRMWARE_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/gnttab.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/gnttab.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,163 @@
++/******************************************************************************
++ * gnttab.h
++ * 
++ * Two sets of functionality:
++ * 1. Granting foreign access to our memory reservation.
++ * 2. Accessing others' memory reservations via grant references.
++ * (i.e., mechanisms for both sender and recipient of grant references)
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * Copyright (c) 2005, Christopher Clark
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_GNTTAB_H__
++#define __ASM_GNTTAB_H__
 +
++#include <asm/hypervisor.h>
++#include <asm/maddr.h> /* maddr_t */
++#include <linux/mm.h>
++#include <xen/interface/grant_table.h>
++#include <xen/features.h>
++
++struct gnttab_free_callback {
++	struct gnttab_free_callback *next;
++	void (*fn)(void *);
++	void *arg;
++	u16 count;
++};
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++				int flags);
 +
 +/*
-+ * Bus types
++ * End access through the given grant reference, iff the grant entry is no
++ * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
++ * use.
 + */
-+#define MCA_bus 0
-+#define MCA_bus__is_a_macro
++int gnttab_end_foreign_access_ref(grant_ref_t ref);
 +
 +/*
-+ * User space process size. 47bits minus one guard page.
++ * Eventually end access through the given grant reference, and once that
++ * access has been ended, free the given page too.  Access will be ended
++ * immediately iff the grant entry is not in use, otherwise it will happen
++ * some time later.  page may be 0, in which case no freeing will occur.
 + */
-+#define TASK_SIZE64	(0x800000000000UL - 4096)
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
 +
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
 +
-+#define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-+#define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
 +
-+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE/3)
++int gnttab_query_foreign_access(grant_ref_t ref);
 +
 +/*
-+ * Size of io_bitmap.
++ * operations on reserved batches of grant references
 + */
-+#define IO_BITMAP_BITS  65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
 +
-+struct i387_fxsave_struct {
-+	u16	cwd;
-+	u16	swd;
-+	u16	twd;
-+	u16	fop;
-+	u64	rip;
-+	u64	rdp; 
-+	u32	mxcsr;
-+	u32	mxcsr_mask;
-+	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
-+	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */
-+	u32	padding[24];
-+} __attribute__ ((aligned (16)));
++void gnttab_free_grant_reference(grant_ref_t ref);
 +
-+union i387_union {
-+	struct i387_fxsave_struct	fxsave;
-+};
++void gnttab_free_grant_references(grant_ref_t head);
 +
-+#ifndef CONFIG_X86_NO_TSS
-+struct tss_struct {
-+	u32 reserved1;
-+	u64 rsp0;	
-+	u64 rsp1;
-+	u64 rsp2;
-+	u64 reserved2;
-+	u64 ist[7];
-+	u32 reserved3;
-+	u32 reserved4;
-+	u16 reserved5;
-+	u16 io_bitmap_base;
-+	/*
-+	 * The extra 1 is there because the CPU will access an
-+	 * additional byte beyond the end of the IO permission
-+	 * bitmap. The extra byte must be all 1 bits, and must
-+	 * be within the limit. Thus we have:
-+	 *
-+	 * 128 bytes, the bitmap itself, for ports 0..0x3ff
-+	 * 8 bytes, for an extra "long" of ~0UL
-+	 */
-+	unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
-+} __attribute__((packed)) ____cacheline_aligned;
++int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
 +
-+DECLARE_PER_CPU(struct tss_struct,init_tss);
-+#endif
++int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
 +
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++				    grant_ref_t release);
 +
-+extern struct cpuinfo_x86 boot_cpu_data;
-+#ifndef CONFIG_X86_NO_TSS
-+/* Save the original ist values for checking stack pointers during debugging */
-+struct orig_ist {
-+	unsigned long ist[7];
-+};
-+DECLARE_PER_CPU(struct orig_ist, orig_ist);
-+#endif
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++				  void (*fn)(void *), void *arg, u16 count);
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
 +
-+#ifdef CONFIG_X86_VSMP
-+#define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT)
-+#define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
-+#else
-+#define ARCH_MIN_TASKALIGN	16
-+#define ARCH_MIN_MMSTRUCT_ALIGN	0
-+#endif
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++				     unsigned long frame, int flags);
 +
-+struct thread_struct {
-+	unsigned long	rsp0;
-+	unsigned long	rsp;
-+	unsigned long 	userrsp;	/* Copy from PDA */ 
-+	unsigned long	fs;
-+	unsigned long	gs;
-+	unsigned short	es, ds, fsindex, gsindex;	
-+/* Hardware debugging registers */
-+	unsigned long	debugreg0;  
-+	unsigned long	debugreg1;  
-+	unsigned long	debugreg2;  
-+	unsigned long	debugreg3;  
-+	unsigned long	debugreg6;  
-+	unsigned long	debugreg7;  
-+/* fault info */
-+	unsigned long	cr2, trap_no, error_code;
-+/* floating point info */
-+	union i387_union	i387  __attribute__((aligned(16)));
-+/* IO permissions. the bitmap could be moved into the GDT, that would make
-+   switch faster for a limited number of ioperm using tasks. -AK */
-+	int		ioperm;
-+	unsigned long	*io_bitmap_ptr;
-+	unsigned io_bitmap_max;
-+/* cached TLS descriptors. */
-+	u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
-+	unsigned int	iopl;
-+} __attribute__((aligned(16)));
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
++				       unsigned long pfn);
 +
-+#define INIT_THREAD  { \
-+	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
++void __gnttab_dma_map_page(struct page *page);
++static inline void __gnttab_dma_unmap_page(struct page *page)
++{
 +}
 +
-+#ifndef CONFIG_X86_NO_TSS
-+#define INIT_TSS  { \
-+	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++void gnttab_reset_grant_page(struct page *page);
++
++int gnttab_suspend(void);
++int gnttab_resume(void);
++
++void *arch_gnttab_alloc_shared(unsigned long *frames);
++
++static inline void
++gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
++		  uint32_t flags, grant_ref_t ref, domid_t domid)
++{
++	if (flags & GNTMAP_contains_pte)
++		map->host_addr = addr;
++	else if (xen_feature(XENFEAT_auto_translated_physmap))
++		map->host_addr = __pa(addr);
++	else
++		map->host_addr = addr;
++
++	map->flags = flags;
++	map->ref = ref;
++	map->dom = domid;
 +}
-+#endif
 +
-+#define INIT_MMAP \
-+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
++static inline void
++gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
++		    uint32_t flags, grant_handle_t handle)
++{
++	if (flags & GNTMAP_contains_pte)
++		unmap->host_addr = addr;
++	else if (xen_feature(XENFEAT_auto_translated_physmap))
++		unmap->host_addr = __pa(addr);
++	else
++		unmap->host_addr = addr;
 +
-+#define start_thread(regs,new_rip,new_rsp) do { \
-+	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));	 \
-+	load_gs_index(0);							\
-+	(regs)->rip = (new_rip);						 \
-+	(regs)->rsp = (new_rsp);						 \
-+	write_pda(oldrsp, (new_rsp));						 \
-+	(regs)->cs = __USER_CS;							 \
-+	(regs)->ss = __USER_DS;							 \
-+	(regs)->eflags = 0x200;							 \
-+	set_fs(USER_DS);							 \
-+} while(0) 
++	unmap->handle = handle;
++	unmap->dev_bus_addr = 0;
++}
 +
-+#define get_debugreg(var, register)				\
-+	var = HYPERVISOR_get_debugreg(register)
-+#define set_debugreg(value, register)			\
-+	HYPERVISOR_set_debugreg(register, value)
++static inline void
++gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
++		      maddr_t new_addr, grant_handle_t handle)
++{
++	if (xen_feature(XENFEAT_auto_translated_physmap)) {
++		unmap->host_addr = __pa(addr);
++		unmap->new_addr = __pa(new_addr);
++	} else {
++		unmap->host_addr = addr;
++		unmap->new_addr = new_addr;
++	}
 +
-+struct task_struct;
-+struct mm_struct;
++	unmap->handle = handle;
++}
 +
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
++#endif /* __ASM_GNTTAB_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/hvm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/hvm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,23 @@
++/* Simple wrappers around HVM functions */
++#ifndef XEN_HVM_H__
++#define XEN_HVM_H__
 +
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
++#include <xen/interface/hvm/params.h>
 +
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++static inline unsigned long hvm_get_parameter(int idx)
++{
++	struct xen_hvm_param xhv;
++	int r;
 +
-+/*
-+ * Return saved PC of a blocked thread.
-+ * What is this good for? it will be always the scheduler or ret_from_fork.
-+ */
-+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
++	xhv.domid = DOMID_SELF;
++	xhv.index = idx;
++	r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
++	if (r < 0) {
++		printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
++		       idx, r);
++		return 0;
++	}
++	return xhv.value;
++}
 +
-+extern unsigned long get_wchan(struct task_struct *p);
-+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
-+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
-+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
++#endif /* XEN_HVM_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/hypercall.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/hypercall.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,30 @@
++#ifndef __XEN_HYPERCALL_H__
++#define __XEN_HYPERCALL_H__
 +
++#include <asm/hypercall.h>
 +
-+struct microcode_header {
-+	unsigned int hdrver;
-+	unsigned int rev;
-+	unsigned int date;
-+	unsigned int sig;
-+	unsigned int cksum;
-+	unsigned int ldrver;
-+	unsigned int pf;
-+	unsigned int datasize;
-+	unsigned int totalsize;
-+	unsigned int reserved[3];
-+};
++static inline int __must_check
++HYPERVISOR_multicall_check(
++	multicall_entry_t *call_list, unsigned int nr_calls,
++	const unsigned long *rc_list)
++{
++	int rc = HYPERVISOR_multicall(call_list, nr_calls);
 +
-+struct microcode {
-+	struct microcode_header hdr;
-+	unsigned int bits[0];
-+};
++	if (unlikely(rc < 0))
++		return rc;
++	BUG_ON(rc);
++	BUG_ON((int)nr_calls < 0);
 +
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
++	for ( ; nr_calls > 0; --nr_calls, ++call_list)
++		if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
++			return nr_calls;
 +
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+	unsigned int sig;
-+	unsigned int pf;
-+	unsigned int cksum;
-+};
++	return 0;
++}
 +
-+struct extended_sigtable {
-+	unsigned int count;
-+	unsigned int cksum;
-+	unsigned int reserved[3];
-+	struct extended_signature sigs[0];
-+};
++/* A construct to ignore the return value of hypercall wrappers in a few
++ * exceptional cases (simply casting the function result to void doesn't
++ * avoid the compiler warning): */
++#define VOID(expr) ((void)((expr)?:0))
 +
++#endif /* __XEN_HYPERCALL_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/hypervisor_sysfs.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/hypervisor_sysfs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,30 @@
++/*
++ *  copyright (c) 2006 IBM Corporation
++ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License version 2 as
++ *  published by the Free Software Foundation.
++ */
 +
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
++#ifndef _HYP_SYSFS_H_
++#define _HYP_SYSFS_H_
 +
-+/* Opteron nops */
-+#define K8_NOP1 ".byte 0x90\n"
-+#define K8_NOP2	".byte 0x66,0x90\n" 
-+#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
-+#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
-+#define K8_NOP5	K8_NOP3 K8_NOP2 
-+#define K8_NOP6	K8_NOP3 K8_NOP3
-+#define K8_NOP7	K8_NOP4 K8_NOP3
-+#define K8_NOP8	K8_NOP4 K8_NOP4
++#include <linux/kobject.h>
++#include <linux/sysfs.h>
 +
-+#define ASM_NOP_MAX 8
++#define HYPERVISOR_ATTR_RO(_name) \
++static struct hyp_sysfs_attr  _name##_attr = __ATTR_RO(_name)
 +
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+static inline void rep_nop(void)
-+{
-+	__asm__ __volatile__("rep;nop": : :"memory");
-+}
++#define HYPERVISOR_ATTR_RW(_name) \
++static struct hyp_sysfs_attr _name##_attr = \
++	__ATTR(_name, 0644, _name##_show, _name##_store)
 +
-+/* Stop speculative execution */
-+static inline void sync_core(void)
-+{ 
-+	int tmp;
-+	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-+} 
++struct hyp_sysfs_attr {
++	struct attribute attr;
++	ssize_t (*show)(struct hyp_sysfs_attr *, char *);
++	ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
++	void *hyp_attr_data;
++};
 +
-+#define cpu_has_fpu 1
++#endif /* _HYP_SYSFS_H_ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/COPYING
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/COPYING	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,38 @@
++XEN NOTICE
++==========
 +
-+#define ARCH_HAS_PREFETCH
-+static inline void prefetch(void *x) 
-+{ 
-+	asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
-+} 
++This copyright applies to all files within this subdirectory and its
++subdirectories:
++  include/public/*.h
++  include/public/hvm/*.h
++  include/public/io/*.h
 +
-+#define ARCH_HAS_PREFETCHW 1
-+static inline void prefetchw(void *x) 
-+{ 
-+	alternative_input("prefetcht0 (%1)",
-+			  "prefetchw (%1)",
-+			  X86_FEATURE_3DNOW,
-+			  "r" (x));
-+} 
++The intention is that these files can be freely copied into the source
++tree of an operating system when porting that OS to run on Xen. Doing
++so does *not* cause the OS to become subject to the terms of the GPL.
 +
-+#define ARCH_HAS_SPINLOCK_PREFETCH 1
++All other files in the Xen source distribution are covered by version
++2 of the GNU General Public License except where explicitly stated
++otherwise within individual source files.
 +
-+#define spin_lock_prefetch(x)  prefetchw(x)
++ -- Keir Fraser (on behalf of the Xen team)
 +
-+#define cpu_relax()   rep_nop()
++=====================================================================
++
++Permission is hereby granted, free of charge, to any person obtaining a copy
++of this software and associated documentation files (the "Software"), to
++deal in the Software without restriction, including without limitation the
++rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++sell copies of the Software, and to permit persons to whom the Software is
++furnished to do so, subject to the following conditions:
 +
++The above copyright notice and this permission notice shall be included in
++all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
++FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
++DEALINGS IN THE SOFTWARE.
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/acm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/acm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,228 @@
 +/*
-+ *      NSC/Cyrix CPU configuration register indexes
++ * acm.h: Xen access control module interface defintions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
 + */
-+#define CX86_CCR0 0xc0
-+#define CX86_CCR1 0xc1
-+#define CX86_CCR2 0xc2
-+#define CX86_CCR3 0xc3
-+#define CX86_CCR4 0xe8
-+#define CX86_CCR5 0xe9
-+#define CX86_CCR6 0xea
-+#define CX86_CCR7 0xeb
-+#define CX86_DIR0 0xfe
-+#define CX86_DIR1 0xff
-+#define CX86_ARR_BASE 0xc4
-+#define CX86_RCR_BASE 0xdc
 +
-+/*
-+ *      NSC/Cyrix CPU indexed register access macros
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "xen.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
 + */
++/* #define ACM_DEBUG */
 +
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++#ifdef ACM_DEBUG
++#  define printkd(fmt, args...) printk(fmt,## args)
++#else
++#  define printkd(fmt, args...)
++#endif
 +
-+#define setCx86(reg, data) do { \
-+	outb((reg), 0x22); \
-+	outb((data), 0x23); \
-+} while (0)
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID  0x0
++#define ACM_DEFAULT_LOCAL_SSID  0x0
 +
-+static inline void serialize_cpu(void)
-+{
-+	__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
-+}
++/* Internal ACM ERROR types */
++#define ACM_OK     0
++#define ACM_UNDEF   -1
++#define ACM_INIT_SSID_ERROR  -2
++#define ACM_INIT_SOID_ERROR  -3
++#define ACM_ERROR          -4
 +
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+		unsigned long edx)
-+{
-+	/* "monitor %eax,%ecx,%edx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc8;"
-+		: :"a" (eax), "c" (ecx), "d"(edx));
-+}
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED        0
++#define ACM_ACCESS_DENIED           -111
++#define ACM_NULL_POINTER_ERROR      -200
 +
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
-+{
-+	/* "mwait %eax,%ecx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc9;"
-+		: :"a" (eax), "c" (ecx));
-+}
++/*
++   Error codes reported in when trying to test for a new policy
++   These error codes are reported in an array of tuples where
++   each error code is followed by a parameter describing the error
++   more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION       0x100
++#define ACM_GNTTAB_SHARING_VIOLATION       0x101
++#define ACM_DOMAIN_LOOKUP                  0x102
++#define ACM_CHWALL_CONFLICT                0x103
++#define ACM_SSIDREF_IN_USE                 0x104
 +
-+#define stack_current() \
-+({								\
-+	struct thread_info *ti;					\
-+	asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-+	ti->task;					\
-+})
 +
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
 +
-+extern unsigned long boot_option_idle_override;
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++    ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
 +
-+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" :                        \
++    ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" :        \
++    ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++    ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++     "UNDEFINED"
 +
-+#endif /* __ASM_X86_64_PROCESSOR_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/ptrace.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/ptrace.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/ptrace.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,127 @@
-+#ifndef _X86_64_PTRACE_H
-+#define _X86_64_PTRACE_H
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 3
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION  1
 +
-+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) 
-+#define R15 0
-+#define R14 8
-+#define R13 16
-+#define R12 24
-+#define RBP 32
-+#define RBX 40
-+/* arguments: interrupts/non tracing syscalls only save upto here*/
-+#define R11 48
-+#define R10 56	
-+#define R9 64
-+#define R8 72
-+#define RAX 80
-+#define RCX 88
-+#define RDX 96
-+#define RSI 104
-+#define RDI 112
-+#define ORIG_RAX 120       /* = ERROR */ 
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined in case of fast syscall. */
-+#define RIP 128
-+#define CS 136
-+#define EFLAGS 144
-+#define RSP 152
-+#define SS 160
-+#define ARGOFFSET R11
-+#endif /* __ASSEMBLY__ */
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
 +
-+/* top of stack page */ 
-+#define FRAME_SIZE 168
++/* hooks that are known to domains */
++#define ACMHOOK_none    0
++#define ACMHOOK_sharing 1
 +
-+#define PTRACE_OLDSETOPTIONS         21
++/* -------security policy relevant type definitions-------- */
 +
-+#ifndef __ASSEMBLY__ 
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
 +
-+struct pt_regs {
-+	unsigned long r15;
-+	unsigned long r14;
-+	unsigned long r13;
-+	unsigned long r12;
-+	unsigned long rbp;
-+	unsigned long rbx;
-+/* arguments: non interrupts/non tracing syscalls only save upto here*/
-+ 	unsigned long r11;
-+	unsigned long r10;	
-+	unsigned long r9;
-+	unsigned long r8;
-+	unsigned long rax;
-+	unsigned long rcx;
-+	unsigned long rdx;
-+	unsigned long rsi;
-+	unsigned long rdi;
-+	unsigned long orig_rax;
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined */
-+	unsigned long rip;
-+	unsigned long cs;
-+	unsigned long eflags; 
-+	unsigned long rsp; 
-+	unsigned long ss;
-+/* top of stack page */ 
-+};
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the 
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ *   running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ *        that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ *    with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC  0x0001debc
 +
-+#endif
++/* each offset in bytes from start of the struct they
++ * are part of */
 +
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++    uint32_t major;
++    uint32_t minor;
++};
 +
-+/* only useful for access 32bit programs */
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
 +
-+#define PTRACE_ARCH_PRCTL	  30	/* arch_prctl for child */
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++    uint32_t policy_version; /* ACM_POLICY_VERSION */
++    uint32_t magic;
++    uint32_t len;
++    uint32_t policy_reference_offset;
++    uint32_t primary_policy_code;
++    uint32_t primary_buffer_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_buffer_offset;
++    struct acm_policy_version xml_pol_version; /* add in V3 */
++};
 +
-+#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
-+#define user_mode(regs) (!!((regs)->cs & 3))
-+#define user_mode_vm(regs) user_mode(regs)
-+#define instruction_pointer(regs) ((regs)->rip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
-+#endif
 +
-+#include <linux/compiler.h>
++struct acm_policy_reference_buffer {
++    uint32_t len;
++};
 +
-+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
++struct acm_chwall_policy_buffer {
++    uint32_t policy_version; /* ACM_CHWALL_VERSION */
++    uint32_t policy_code;
++    uint32_t chwall_max_types;
++    uint32_t chwall_max_ssidrefs;
++    uint32_t chwall_max_conflictsets;
++    uint32_t chwall_ssid_offset;
++    uint32_t chwall_conflict_sets_offset;
++    uint32_t chwall_running_types_offset;
++    uint32_t chwall_conflict_aggregate_offset;
++};
 +
-+struct task_struct;
++struct acm_ste_policy_buffer {
++    uint32_t policy_version; /* ACM_STE_VERSION */
++    uint32_t policy_code;
++    uint32_t ste_max_types;
++    uint32_t ste_max_ssidrefs;
++    uint32_t ste_ssid_offset;
++};
 +
-+extern unsigned long
-+convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
++struct acm_stats_buffer {
++    uint32_t magic;
++    uint32_t len;
++    uint32_t primary_policy_code;
++    uint32_t primary_stats_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_stats_offset;
++};
 +
-+enum {
-+        EF_CF   = 0x00000001,
-+        EF_PF   = 0x00000004,
-+        EF_AF   = 0x00000010,
-+        EF_ZF   = 0x00000040,
-+        EF_SF   = 0x00000080,
-+        EF_TF   = 0x00000100,
-+        EF_IE   = 0x00000200,
-+        EF_DF   = 0x00000400,
-+        EF_OF   = 0x00000800,
-+        EF_IOPL = 0x00003000,
-+        EF_IOPL_RING0 = 0x00000000,
-+        EF_IOPL_RING1 = 0x00001000,
-+        EF_IOPL_RING2 = 0x00002000,
-+        EF_NT   = 0x00004000,   /* nested task */
-+        EF_RF   = 0x00010000,   /* resume */
-+        EF_VM   = 0x00020000,   /* virtual mode */
-+        EF_AC   = 0x00040000,   /* alignment */
-+        EF_VIF  = 0x00080000,   /* virtual interrupt */
-+        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
-+        EF_ID   = 0x00200000,   /* id */
++struct acm_ste_stats_buffer {
++    uint32_t ec_eval_count;
++    uint32_t gt_eval_count;
++    uint32_t ec_denied_count;
++    uint32_t gt_denied_count;
++    uint32_t ec_cachehit_count;
++    uint32_t gt_cachehit_count;
 +};
 +
-+#endif
++struct acm_ssid_buffer {
++    uint32_t len;
++    ssidref_t ssidref;
++    uint32_t policy_reference_offset;
++    uint32_t primary_policy_code;
++    uint32_t primary_max_types;
++    uint32_t primary_types_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_max_types;
++    uint32_t secondary_types_offset;
++};
 +
 +#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/smp.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/smp.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/smp.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,150 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
 +
 +/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/acm_ops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/acm_ops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,159 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
 + */
-+#ifndef __ASSEMBLY__
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/bitops.h>
-+extern int disable_apic;
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef __ASSEMBLY__
-+#include <asm/fixmap.h>
-+#include <asm/mpspec.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#include <asm/apic.h>
-+#include <asm/thread_info.h>
-+#endif
-+#endif
 +
-+#ifdef CONFIG_SMP
-+#ifndef ASSEMBLY
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
 +
-+#include <asm/pda.h>
++#include "xen.h"
++#include "acm.h"
 +
-+struct pt_regs;
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION   0xAAAA000A
 +
-+extern cpumask_t cpu_present_mask;
-+extern cpumask_t cpu_possible_map;
-+extern cpumask_t cpu_online_map;
-+extern cpumask_t cpu_initialized;
++/************************************************************************/
 +
 +/*
-+ * Private routines/data
++ * Prototype for this hypercall is:
++ *  int acm_op(int cmd, void *args)
++ * @cmd  == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
 + */
-+ 
-+extern void smp_alloc_memory(void);
-+extern volatile unsigned long smp_invalidate_needed;
-+extern int pic_mode;
-+extern void lock_ipi_call_lock(void);
-+extern void unlock_ipi_call_lock(void);
-+extern int smp_num_siblings;
-+extern void smp_send_reschedule(int cpu);
-+void smp_stop_cpu(void);
-+extern int smp_call_function_single(int cpuid, void (*func) (void *info),
-+				void *info, int retry, int wait);
 +
-+extern cpumask_t cpu_sibling_map[NR_CPUS];
-+extern cpumask_t cpu_core_map[NR_CPUS];
-+extern u8 cpu_llc_id[NR_CPUS];
 +
-+#define SMP_TRAMPOLINE_BASE 0x6000
++#define ACMOP_setpolicy         1
++struct acm_setpolicy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pushcache;
++    uint32_t pushcache_size;
++};
 +
-+/*
-+ * On x86 all CPUs are mapped 1:1 to the APIC space.
-+ * This simplifies scheduling and IPI sending and
-+ * compresses data structures.
-+ */
 +
-+static inline int num_booting_cpus(void)
-+{
-+	return cpus_weight(cpu_possible_map);
-+}
++#define ACMOP_getpolicy         2
++struct acm_getpolicy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pullcache;
++    uint32_t pullcache_size;
++};
 +
-+#define raw_smp_processor_id() read_pda(cpunumber)
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static inline int hard_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-+}
-+#endif
++#define ACMOP_dumpstats         3
++struct acm_dumpstats {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pullcache;
++    uint32_t pullcache_size;
++};
 +
-+extern int safe_smp_processor_id(void);
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
-+extern void prefill_possible_map(void);
-+extern unsigned num_processors;
-+extern unsigned disabled_cpus;
 +
-+#endif /* !ASSEMBLY */
++#define ACMOP_getssid           4
++#define ACM_GETBY_ssidref  1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++    /* IN */
++    uint32_t get_ssid_by; /* ACM_GETBY_* */
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id;
++    XEN_GUEST_HANDLE_64(void) ssidbuf;
++    uint32_t ssidbuf_size;
++};
 +
-+#define NO_PROC_ID		0xFF		/* No processor magic marker */
++#define ACMOP_getdecision      5
++struct acm_getdecision {
++    /* IN */
++    uint32_t get_decision_by1; /* ACM_GETBY_* */
++    uint32_t get_decision_by2; /* ACM_GETBY_* */
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id1;
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id2;
++    uint32_t hook;
++    /* OUT */
++    uint32_t acm_decision;
++};
 +
-+#endif
 +
-+#ifndef ASSEMBLY
-+/*
-+ * Some lowlevel functions might want to know about
-+ * the real APIC ID <-> CPU # mapping.
-+ */
-+extern u8 x86_cpu_to_apicid[NR_CPUS];	/* physical ID */
-+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
-+extern u8 bios_cpu_apicid[];
++#define ACMOP_chgpolicy        6
++struct acm_change_policy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) policy_pushcache;
++    uint32_t policy_pushcache_size;
++    XEN_GUEST_HANDLE_64(void) del_array;
++    uint32_t delarray_size;
++    XEN_GUEST_HANDLE_64(void) chg_array;
++    uint32_t chgarray_size;
++    /* OUT */
++    /* array with error code */
++    XEN_GUEST_HANDLE_64(void) err_array;
++    uint32_t errarray_size;
++};
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+	return cpus_addr(cpumask)[0];
-+}
++#define ACMOP_relabeldoms       7
++struct acm_relabel_doms {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) relabel_map;
++    uint32_t relabel_map_size;
++    /* OUT */
++    XEN_GUEST_HANDLE_64(void) err_array;
++    uint32_t errarray_size;
++};
 +
-+static inline int cpu_present_to_apicid(int mps_cpu)
-+{
-+	if (mps_cpu < NR_CPUS)
-+		return (int)bios_cpu_apicid[mps_cpu];
-+	else
-+		return BAD_APICID;
-+}
-+#endif
++/* future interface to Xen */
++struct xen_acmctl {
++    uint32_t cmd;
++    uint32_t interface_version;
++    union {
++        struct acm_setpolicy     setpolicy;
++        struct acm_getpolicy     getpolicy;
++        struct acm_dumpstats     dumpstats;
++        struct acm_getssid       getssid;
++        struct acm_getdecision   getdecision;
++        struct acm_change_policy change_policy;
++        struct acm_relabel_doms  relabel_doms;
++    } u;
++};
 +
-+#endif /* !ASSEMBLY */
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
 +
-+#ifndef CONFIG_SMP
-+#define stack_smp_processor_id() 0
-+#define safe_smp_processor_id() 0
-+#define cpu_logical_map(x) (x)
-+#else
-+#include <asm/thread_info.h>
-+#define stack_smp_processor_id() \
-+({ 								\
-+	struct thread_info *ti;					\
-+	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-+	ti->cpu;						\
-+})
-+#endif
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
 +
-+#ifndef __ASSEMBLY__
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static __inline int logical_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+#endif
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-ia64.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-ia64.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,621 @@
++/******************************************************************************
++ * arch-ia64/hypervisor-if.h
++ * 
++ * Guest OS interface to IA64 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "xen.h"
++
++#ifndef __HYPERVISOR_IF_IA64_H__
++#define __HYPERVISOR_IF_IA64_H__
++
++#if !defined(__GNUC__) || defined(__STRICT_ANSI__)
++#error "Anonymous structs/unions are a GNU extension."
 +#endif
 +
-+#ifdef CONFIG_SMP
-+#define cpu_physical_id(cpu)		x86_cpu_to_apicid[cpu]
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++    typedef struct { type *p; } __guest_handle_ ## name
 +#else
-+#define cpu_physical_id(cpu)		boot_cpu_id
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++    typedef type * __guest_handle_ ## name
 +#endif
 +
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
++    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++
++#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
++#define XEN_GUEST_HANDLE_64(name)       XEN_GUEST_HANDLE(name)
++#define uint64_aligned_t                uint64_t
++#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
 +#endif
 +
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/synch_bitops.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/synch_bitops.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/synch_bitops.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,2 @@
++#ifndef __ASSEMBLY__
++typedef unsigned long xen_pfn_t;
++#define PRI_xen_pfn "lx"
++#endif
 +
-+#include <asm-i386/mach-xen/asm/synch_bitops.h>
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/system.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/system.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/system.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,262 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
++/* Arch specific VIRQs definition */
++#define VIRQ_ITC        VIRQ_ARCH_0 /* V. Virtual itc timer */
++#define VIRQ_MCA_CMC    VIRQ_ARCH_1 /* MCA cmc interrupt */
++#define VIRQ_MCA_CPE    VIRQ_ARCH_2 /* MCA cpe interrupt */
 +
-+#include <linux/kernel.h>
-+#include <asm/segment.h>
-+#include <asm/alternative.h>
++/* Maximum number of virtual CPUs in multi-processor guests. */
++/* WARNING: before changing this, check that shared_info fits on a page */
++#define MAX_VIRT_CPUS 64
 +
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/arch-x86_64.h>
++/* IO ports location for PV.  */
++#define IO_PORTS_PADDR          0x00000ffffc000000UL
++#define IO_PORTS_SIZE           0x0000000004000000UL
 +
-+#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
 +
-+#ifdef CONFIG_SMP
-+#define __vcpu_id smp_processor_id()
++typedef unsigned long xen_ulong_t;
++
++#ifdef __XEN_TOOLS__
++#define XEN_PAGE_SIZE XC_PAGE_SIZE
 +#else
-+#define __vcpu_id 0
++#define XEN_PAGE_SIZE PAGE_SIZE
 +#endif
 +
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
++#define INVALID_MFN       (~0UL)
 +
-+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
++struct pt_fpreg {
++    union {
++        unsigned long bits[2];
++        long double __dummy;    /* force 16-byte alignment */
++    } u;
++};
 +
-+/* frame pointer must be last for get_wchan */
-+#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
++union vac {
++    unsigned long value;
++    struct {
++        int a_int:1;
++        int a_from_int_cr:1;
++        int a_to_int_cr:1;
++        int a_from_psr:1;
++        int a_from_cpuid:1;
++        int a_cover:1;
++        int a_bsw:1;
++        long reserved:57;
++    };
++};
++typedef union vac vac_t;
 +
-+#define __EXTRA_CLOBBER  \
-+	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
++union vdc {
++    unsigned long value;
++    struct {
++        int d_vmsw:1;
++        int d_extint:1;
++        int d_ibr_dbr:1;
++        int d_pmc:1;
++        int d_to_pmd:1;
++        int d_itm:1;
++        long reserved:58;
++    };
++};
++typedef union vdc vdc_t;
 +
-+#define switch_to(prev,next,last) \
-+	asm volatile(SAVE_CONTEXT						    \
-+		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
-+		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
-+		     "call __switch_to\n\t"					  \
-+		     ".globl thread_return\n"					\
-+		     "thread_return:\n\t"					    \
-+		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
-+		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
-+		     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
-+		     "movq %%rax,%%rdi\n\t" 					  \
-+		     "jc   ret_from_fork\n\t"					  \
-+		     RESTORE_CONTEXT						    \
-+		     : "=a" (last)					  	  \
-+		     : [next] "S" (next), [prev] "D" (prev),			  \
-+		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
-+		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
-+		       [tif_fork] "i" (TIF_FORK),			  \
-+		       [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
-+		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
-+		     : "memory", "cc" __EXTRA_CLOBBER)
-+    
-+extern void load_gs_index(unsigned); 
++struct mapped_regs {
++    union vac   vac;
++    union vdc   vdc;
++    unsigned long  virt_env_vaddr;
++    unsigned long  reserved1[29];
++    unsigned long  vhpi;
++    unsigned long  reserved2[95];
++    union {
++        unsigned long  vgr[16];
++        unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
++    };
++    union {
++        unsigned long  vbgr[16];
++        unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
++    };
++    unsigned long  vnat;
++    unsigned long  vbnat;
++    unsigned long  vcpuid[5];
++    unsigned long  reserved3[11];
++    unsigned long  vpsr;
++    unsigned long  vpr;
++    unsigned long  reserved4[76];
++    union {
++        unsigned long  vcr[128];
++        struct {
++            unsigned long dcr;  // CR0
++            unsigned long itm;
++            unsigned long iva;
++            unsigned long rsv1[5];
++            unsigned long pta;  // CR8
++            unsigned long rsv2[7];
++            unsigned long ipsr;  // CR16
++            unsigned long isr;
++            unsigned long rsv3;
++            unsigned long iip;
++            unsigned long ifa;
++            unsigned long itir;
++            unsigned long iipa;
++            unsigned long ifs;
++            unsigned long iim;  // CR24
++            unsigned long iha;
++            unsigned long rsv4[38];
++            unsigned long lid;  // CR64
++            unsigned long ivr;
++            unsigned long tpr;
++            unsigned long eoi;
++            unsigned long irr[4];
++            unsigned long itv;  // CR72
++            unsigned long pmv;
++            unsigned long cmcv;
++            unsigned long rsv5[5];
++            unsigned long lrr0;  // CR80
++            unsigned long lrr1;
++            unsigned long rsv6[46];
++        };
++    };
++    union {
++        unsigned long  reserved5[128];
++        struct {
++            unsigned long precover_ifs;
++            unsigned long unat;  // not sure if this is needed until NaT arch is done
++            int interrupt_collection_enabled; // virtual psr.ic
++            /* virtual interrupt deliverable flag is evtchn_upcall_mask in
++             * shared info area now. interrupt_mask_addr is the address
++             * of evtchn_upcall_mask for current vcpu
++             */
++            unsigned char *interrupt_mask_addr;
++            int pending_interruption;
++            unsigned char vpsr_pp;
++            unsigned char vpsr_dfh;
++            unsigned char hpsr_dfh;
++            unsigned char hpsr_mfh;
++            unsigned long reserved5_1[4];
++            int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
++            int banknum; // 0 or 1, which virtual register bank is active
++            unsigned long rrs[8]; // region registers
++            unsigned long krs[8]; // kernel registers
++            unsigned long tmp[16]; // temp registers (e.g. for hyperprivops)
++        };
++    };
++};
++typedef struct mapped_regs mapped_regs_t;
 +
-+/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
-+ */
-+#define loadsegment(seg,value)	\
-+	asm volatile("\n"			\
-+		"1:\t"				\
-+		"movl %k0,%%" #seg "\n"		\
-+		"2:\n"				\
-+		".section .fixup,\"ax\"\n"	\
-+		"3:\t"				\
-+		"movl %1,%%" #seg "\n\t" 	\
-+		"jmp 2b\n"			\
-+		".previous\n"			\
-+		".section __ex_table,\"a\"\n\t"	\
-+		".align 8\n\t"			\
-+		".quad 1b,3b\n"			\
-+		".previous"			\
-+		: :"r" (value), "r" (0))
++struct vpd {
++    struct mapped_regs vpd_low;
++    unsigned long  reserved6[3456];
++    unsigned long  vmm_avail[128];
++    unsigned long  reserved7[4096];
++};
++typedef struct vpd vpd_t;
++
++struct arch_vcpu_info {
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
 +
 +/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
++ * This structure is used for magic page in domain pseudo physical address
++ * space and the result of XENMEM_machine_memory_map.
++ * As the XENMEM_machine_memory_map result,
++ * xen_memory_map::nr_entries indicates the size in bytes 
++ * including struct xen_ia64_memmap_info. Not the number of entries.
++ */
++struct xen_ia64_memmap_info {
++    uint64_t efi_memmap_size;       /* size of EFI memory map */
++    uint64_t efi_memdesc_size;      /* size of an EFI memory map descriptor */
++    uint32_t efi_memdesc_version;   /* memory descriptor version */
++    void *memdesc[0];               /* array of efi_memory_desc_t */
++};
++typedef struct xen_ia64_memmap_info xen_ia64_memmap_info_t;
 +
-+static inline unsigned long read_cr0(void)
-+{ 
-+	unsigned long cr0;
-+	asm volatile("movq %%cr0,%0" : "=r" (cr0));
-+	return cr0;
-+} 
++struct arch_shared_info {
++    /* PFN of the start_info page.  */
++    unsigned long start_info_pfn;
 +
-+static inline void write_cr0(unsigned long val) 
-+{ 
-+	asm volatile("movq %0,%%cr0" :: "r" (val));
-+} 
++    /* Interrupt vector for event channel.  */
++    int evtchn_vector;
 +
-+#define read_cr3() ({ \
-+	unsigned long __dummy; \
-+	asm("movq %%cr3,%0" : "=r" (__dummy)); \
-+	machine_to_phys(__dummy); \
-+})
++    /* PFN of memmap_info page */
++    unsigned int memmap_info_num_pages;/* currently only = 1 case is
++                                          supported. */
++    unsigned long memmap_info_pfn;
 +
-+static inline unsigned long read_cr4(void)
-+{ 
-+	unsigned long cr4;
-+	asm("movq %%cr4,%0" : "=r" (cr4));
-+	return cr4;
-+} 
++    uint64_t pad[31];
++};
++typedef struct arch_shared_info arch_shared_info_t;
 +
-+static inline void write_cr4(unsigned long val)
-+{ 
-+	asm volatile("movq %0,%%cr4" :: "r" (val));
-+} 
++typedef unsigned long xen_callback_t;
 +
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
++struct ia64_tr_entry {
++    unsigned long pte;
++    unsigned long itir;
++    unsigned long vadr;
++    unsigned long rid;
++};
++typedef struct ia64_tr_entry ia64_tr_entry_t;
++DEFINE_XEN_GUEST_HANDLE(ia64_tr_entry_t);
 +
-+#define wbinvd() \
-+	__asm__ __volatile__ ("wbinvd": : :"memory");
++struct vcpu_tr_regs {
++    struct ia64_tr_entry itrs[12];
++    struct ia64_tr_entry dtrs[12];
++};
 +
-+/*
-+ * On SMP systems, when the scheduler does migration-cost autodetection,
-+ * it needs a way to flush as much of the CPU's caches as possible.
-+ */
-+static inline void sched_cacheflush(void)
-+{
-+	wbinvd();
-+}
++union vcpu_ar_regs {
++    unsigned long ar[128];
++    struct {
++        unsigned long kr[8];
++        unsigned long rsv1[8];
++        unsigned long rsc;
++        unsigned long bsp;
++        unsigned long bspstore;
++        unsigned long rnat;
++        unsigned long rsv2;
++        unsigned long fcr;
++        unsigned long rsv3[2];
++        unsigned long eflag;
++        unsigned long csd;
++        unsigned long ssd;
++        unsigned long cflg;
++        unsigned long fsr;
++        unsigned long fir;
++        unsigned long fdr;
++        unsigned long rsv4;
++        unsigned long ccv; /* 32 */
++        unsigned long rsv5[3];
++        unsigned long unat;
++        unsigned long rsv6[3];
++        unsigned long fpsr;
++        unsigned long rsv7[3];
++        unsigned long itc;
++        unsigned long rsv8[3];
++        unsigned long ign1[16];
++        unsigned long pfs; /* 64 */
++        unsigned long lc;
++        unsigned long ec;
++        unsigned long rsv9[45];
++        unsigned long ign2[16];
++    };
++};
 +
-+#endif	/* __KERNEL__ */
++union vcpu_cr_regs {
++    unsigned long cr[128];
++    struct {
++        unsigned long dcr;  // CR0
++        unsigned long itm;
++        unsigned long iva;
++        unsigned long rsv1[5];
++        unsigned long pta;  // CR8
++        unsigned long rsv2[7];
++        unsigned long ipsr;  // CR16
++        unsigned long isr;
++        unsigned long rsv3;
++        unsigned long iip;
++        unsigned long ifa;
++        unsigned long itir;
++        unsigned long iipa;
++        unsigned long ifs;
++        unsigned long iim;  // CR24
++        unsigned long iha;
++        unsigned long rsv4[38];
++        unsigned long lid;  // CR64
++        unsigned long ivr;
++        unsigned long tpr;
++        unsigned long eoi;
++        unsigned long irr[4];
++        unsigned long itv;  // CR72
++        unsigned long pmv;
++        unsigned long cmcv;
++        unsigned long rsv5[5];
++        unsigned long lrr0;  // CR80
++        unsigned long lrr1;
++        unsigned long rsv6[46];
++    };
++};
 +
-+#define nop() __asm__ __volatile__ ("nop")
++struct vcpu_guest_context_regs {
++        unsigned long r[32];
++        unsigned long b[8];
++        unsigned long bank[16];
++        unsigned long ip;
++        unsigned long psr;
++        unsigned long cfm;
++        unsigned long pr;
++        unsigned int nats; /* NaT bits for r1-r31.  */
++        unsigned int bnats; /* Nat bits for banked registers.  */
++        union vcpu_ar_regs ar;
++        union vcpu_cr_regs cr;
++        struct pt_fpreg f[128];
++        unsigned long dbr[8];
++        unsigned long ibr[8];
++        unsigned long rr[8];
++        unsigned long pkr[16];
++
++        /* FIXME: cpuid,pmd,pmc */
++
++        unsigned long xip;
++        unsigned long xpsr;
++        unsigned long xfs;
++        unsigned long xr[4];
++
++        struct vcpu_tr_regs tr;
++
++        /* Physical registers in case of debug event.  */
++        unsigned long excp_iipa;
++        unsigned long excp_ifa;
++        unsigned long excp_isr;
++        unsigned int excp_vector;
 +
-+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++        /*
++         * The rbs is intended to be the image of the stacked registers still
++         * in the cpu (not yet stored in memory).  It is laid out as if it
++         * were written in memory at a 512 (64*8) aligned address + offset.
++         * rbs_voff is (offset / 8).  rbs_nat contains NaT bits for the
++         * remaining rbs registers.  rbs_rnat contains NaT bits for in memory
++         * rbs registers.
++         * Note: loadrs is 2**14 bytes == 2**11 slots.
++         */
++        unsigned int rbs_voff;
++        unsigned long rbs[2048];
++        unsigned long rbs_rnat;
 +
-+#define tas(ptr) (xchg((ptr),1))
++        /*
++         * RSE.N_STACKED_PHYS via PAL_RSE_INFO
++         * Strictly this isn't cpu context, but this value is necessary
++         * for domain save/restore. So is here.
++         */
++        unsigned long num_phys_stacked;
++};
 +
-+#define __xg(x) ((volatile long *)(x))
++struct vcpu_guest_context {
++#define VGCF_EXTRA_REGS (1UL << 1)	/* Set extra regs.  */
++#define VGCF_SET_CR_IRR (1UL << 2)	/* Set cr_irr[0:3]. */
++#define VGCF_online     (1UL << 3)  /* make this vcpu online */
++    unsigned long flags;       /* VGCF_* flags */
 +
-+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-+{
-+	*ptr = val;
-+}
++    struct vcpu_guest_context_regs regs;
 +
-+#define _set_64bit set_64bit
++    unsigned long event_callback_ip;
 +
-+/*
-+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
-+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
-+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
-+ */
-+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-+{
-+	switch (size) {
-+		case 1:
-+			__asm__ __volatile__("xchgb %b0,%1"
-+				:"=q" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 2:
-+			__asm__ __volatile__("xchgw %w0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 4:
-+			__asm__ __volatile__("xchgl %k0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 8:
-+			__asm__ __volatile__("xchgq %0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+	}
-+	return x;
-+}
++    /* xen doesn't share privregs pages with hvm domain so that this member
++     * doesn't make sense for hvm domain.
++     * ~0UL is already used for INVALID_P2M_ENTRY. */
++#define VGC_PRIVREGS_HVM       (~(-2UL))
++    unsigned long privregs_pfn;
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
 +
-+/*
-+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
-+ * store NEW in MEM.  Return the initial value in MEM.  Success is
-+ * indicated by comparing RETURN with OLD.
-+ */
++/* dom0 vp op */
++#define __HYPERVISOR_ia64_dom0vp_op     __HYPERVISOR_arch_0
++/*  Map io space in machine address to dom0 physical address space.
++    Currently physical assigned address equals to machine address.  */
++#define IA64_DOM0VP_ioremap             0
 +
-+#define __HAVE_ARCH_CMPXCHG 1
++/* Convert a pseudo physical page frame number to the corresponding
++   machine page frame number. If no page is assigned, INVALID_MFN or
++   GPFN_INV_MASK is returned depending on domain's non-vti/vti mode.  */
++#define IA64_DOM0VP_phystomach          1
 +
-+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-+				      unsigned long new, int size)
-+{
-+	unsigned long prev;
-+	switch (size) {
-+	case 1:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 2:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 4:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 8:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
-+				     : "=a"(prev)
-+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	}
-+	return old;
-+}
++/* Convert a machine page frame number to the corresponding pseudo physical
++   page frame number of the caller domain.  */
++#define IA64_DOM0VP_machtophys          3
 +
-+#define cmpxchg(ptr,o,n)\
-+	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-+					(unsigned long)(n),sizeof(*(ptr))))
++/* Reserved for future use.  */
++#define IA64_DOM0VP_iounmap             4
 +
-+#ifdef CONFIG_SMP
-+#define smp_mb()	mb()
-+#define smp_rmb()	rmb()
-+#define smp_wmb()	wmb()
-+#define smp_read_barrier_depends()	do {} while(0)
-+#else
-+#define smp_mb()	barrier()
-+#define smp_rmb()	barrier()
-+#define smp_wmb()	barrier()
-+#define smp_read_barrier_depends()	do {} while(0)
-+#endif
++/* Unmap and free pages contained in the specified pseudo physical region.  */
++#define IA64_DOM0VP_zap_physmap         5
 +
-+    
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ */
-+#define mb() 	asm volatile("mfence":::"memory")
-+#define rmb()	asm volatile("lfence":::"memory")
++/* Assign machine page frame to dom0's pseudo physical address space.  */
++#define IA64_DOM0VP_add_physmap         6
 +
-+#ifdef CONFIG_UNORDERED_IO
-+#define wmb()	asm volatile("sfence" ::: "memory")
-+#else
-+#define wmb()	asm volatile("" ::: "memory")
-+#endif
-+#define read_barrier_depends()	do {} while(0)
-+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++/* expose the p2m table into domain */
++#define IA64_DOM0VP_expose_p2m          7
++
++/* xen perfmon */
++#define IA64_DOM0VP_perfmon             8
++
++/* gmfn version of IA64_DOM0VP_add_physmap */
++#define IA64_DOM0VP_add_physmap_with_gmfn       9
++
++/* get fpswa revision */
++#define IA64_DOM0VP_fpswa_revision      10
++
++/* Add an I/O port space range */
++#define IA64_DOM0VP_add_io_space        11
++
++/* expose the foreign domain's p2m table into privileged domain */
++#define IA64_DOM0VP_expose_foreign_p2m  12
++#define         IA64_DOM0VP_EFP_ALLOC_PTE       0x1 /* allocate p2m table */
++
++/* unexpose the foreign domain's p2m table into privileged domain */
++#define IA64_DOM0VP_unexpose_foreign_p2m        13
++
++// flags for page assignement to pseudo physical address space
++#define _ASSIGN_readonly                0
++#define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
++#define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
++/* Internal only: memory attribute must be WC/UC/UCE.  */
++#define _ASSIGN_nocache                 1
++#define ASSIGN_nocache                  (1UL << _ASSIGN_nocache)
++// tlb tracking
++#define _ASSIGN_tlb_track               2
++#define ASSIGN_tlb_track                (1UL << _ASSIGN_tlb_track)
++/* Internal only: associated with PGC_allocated bit */
++#define _ASSIGN_pgc_allocated           3
++#define ASSIGN_pgc_allocated            (1UL << _ASSIGN_pgc_allocated)
++/* Page is an IO page.  */
++#define _ASSIGN_io                      4
++#define ASSIGN_io                       (1UL << _ASSIGN_io)
++
++/* This structure has the same layout of struct ia64_boot_param, defined in
++   <asm/system.h>.  It is redefined here to ease use.  */
++struct xen_ia64_boot_param {
++	unsigned long command_line;	/* physical address of cmd line args */
++	unsigned long efi_systab;	/* physical address of EFI system table */
++	unsigned long efi_memmap;	/* physical address of EFI memory map */
++	unsigned long efi_memmap_size;	/* size of EFI memory map */
++	unsigned long efi_memdesc_size;	/* size of an EFI memory map descriptor */
++	unsigned int  efi_memdesc_version;	/* memory descriptor version */
++	struct {
++		unsigned short num_cols;	/* number of columns on console.  */
++		unsigned short num_rows;	/* number of rows on console.  */
++		unsigned short orig_x;	/* cursor's x position */
++		unsigned short orig_y;	/* cursor's y position */
++	} console_info;
++	unsigned long fpswa;		/* physical address of the fpswa interface */
++	unsigned long initrd_start;
++	unsigned long initrd_size;
++	unsigned long domain_start;	/* va where the boot time domain begins */
++	unsigned long domain_size;	/* how big is the boot domain */
++};
++
++#endif /* !__ASSEMBLY__ */
++
++/* Size of the shared_info area (this is not related to page size).  */
++#define XSI_SHIFT			14
++#define XSI_SIZE			(1 << XSI_SHIFT)
++/* Log size of mapped_regs area (64 KB - only 4KB is used).  */
++#define XMAPPEDREGS_SHIFT		12
++#define XMAPPEDREGS_SIZE		(1 << XMAPPEDREGS_SHIFT)
++/* Offset of XASI (Xen arch shared info) wrt XSI_BASE.  */
++#define XMAPPEDREGS_OFS			XSI_SIZE
++
++/* Hyperprivops.  */
++#define HYPERPRIVOP_START		0x1
++#define HYPERPRIVOP_RFI			(HYPERPRIVOP_START + 0x0)
++#define HYPERPRIVOP_RSM_DT		(HYPERPRIVOP_START + 0x1)
++#define HYPERPRIVOP_SSM_DT		(HYPERPRIVOP_START + 0x2)
++#define HYPERPRIVOP_COVER		(HYPERPRIVOP_START + 0x3)
++#define HYPERPRIVOP_ITC_D		(HYPERPRIVOP_START + 0x4)
++#define HYPERPRIVOP_ITC_I		(HYPERPRIVOP_START + 0x5)
++#define HYPERPRIVOP_SSM_I		(HYPERPRIVOP_START + 0x6)
++#define HYPERPRIVOP_GET_IVR		(HYPERPRIVOP_START + 0x7)
++#define HYPERPRIVOP_GET_TPR		(HYPERPRIVOP_START + 0x8)
++#define HYPERPRIVOP_SET_TPR		(HYPERPRIVOP_START + 0x9)
++#define HYPERPRIVOP_EOI			(HYPERPRIVOP_START + 0xa)
++#define HYPERPRIVOP_SET_ITM		(HYPERPRIVOP_START + 0xb)
++#define HYPERPRIVOP_THASH		(HYPERPRIVOP_START + 0xc)
++#define HYPERPRIVOP_PTC_GA		(HYPERPRIVOP_START + 0xd)
++#define HYPERPRIVOP_ITR_D		(HYPERPRIVOP_START + 0xe)
++#define HYPERPRIVOP_GET_RR		(HYPERPRIVOP_START + 0xf)
++#define HYPERPRIVOP_SET_RR		(HYPERPRIVOP_START + 0x10)
++#define HYPERPRIVOP_SET_KR		(HYPERPRIVOP_START + 0x11)
++#define HYPERPRIVOP_FC			(HYPERPRIVOP_START + 0x12)
++#define HYPERPRIVOP_GET_CPUID		(HYPERPRIVOP_START + 0x13)
++#define HYPERPRIVOP_GET_PMD		(HYPERPRIVOP_START + 0x14)
++#define HYPERPRIVOP_GET_EFLAG		(HYPERPRIVOP_START + 0x15)
++#define HYPERPRIVOP_SET_EFLAG		(HYPERPRIVOP_START + 0x16)
++#define HYPERPRIVOP_RSM_BE		(HYPERPRIVOP_START + 0x17)
++#define HYPERPRIVOP_GET_PSR		(HYPERPRIVOP_START + 0x18)
++#define HYPERPRIVOP_SET_RR0_TO_RR4	(HYPERPRIVOP_START + 0x19)
++#define HYPERPRIVOP_MAX			(0x1a)
 +
-+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
++/* Fast and light hypercalls.  */
++#define __HYPERVISOR_ia64_fast_eoi	__HYPERVISOR_arch_1
 +
-+#include <linux/irqflags.h>
++/* Extra debug features.  */
++#define __HYPERVISOR_ia64_debug_op  __HYPERVISOR_arch_2
 +
-+void cpu_idle_wait(void);
++/* Xencomm macros.  */
++#define XENCOMM_INLINE_MASK 0xf800000000000000UL
++#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
 +
-+extern unsigned long arch_align_stack(unsigned long sp);
-+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++#ifndef __ASSEMBLY__
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/timer.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/timer.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/timer.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,67 @@
-+#ifndef _ASMi386_TIMER_H
-+#define _ASMi386_TIMER_H
-+#include <linux/init.h>
++/*
++ * Optimization features.
++ * The hypervisor may do some special optimizations for guests. This hypercall
++ * can be used to switch on/of these special optimizations.
++ */
++#define __HYPERVISOR_opt_feature	0x700UL
 +
-+/**
-+ * struct timer_ops - used to define a timer source
-+ *
-+ * @name: name of the timer.
-+ * @init: Probes and initializes the timer. Takes clock= override 
-+ *        string as an argument. Returns 0 on success, anything else
-+ *        on failure.
-+ * @mark_offset: called by the timer interrupt.
-+ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
-+ *               since the last timer interupt.
-+ * @monotonic_clock: returns the number of nanoseconds since the init of the
-+ *                   timer.
-+ * @delay: delays this many clock cycles.
++#define XEN_IA64_OPTF_OFF	0x0
++#define XEN_IA64_OPTF_ON	0x1
++
++/*
++ * If this feature is switched on, the hypervisor inserts the
++ * tlb entries without calling the guests traphandler.
++ * This is useful in guests using region 7 for identity mapping
++ * like the linux kernel does.
 + */
-+struct timer_opts {
-+	char* name;
-+	void (*mark_offset)(void);
-+	unsigned long (*get_offset)(void);
-+	unsigned long long (*monotonic_clock)(void);
-+	void (*delay)(unsigned long);
-+	unsigned long (*read_timer)(void);
-+	int (*suspend)(pm_message_t state);
-+	int (*resume)(void);
-+};
++#define XEN_IA64_OPTF_IDENT_MAP_REG7    1
 +
-+struct init_timer_opts {
-+	int (*init)(char *override);
-+	struct timer_opts *opts;
-+};
++/* Identity mapping of region 4 addresses in HVM. */
++#define XEN_IA64_OPTF_IDENT_MAP_REG4    2
 +
-+#define TICK_SIZE (tick_nsec / 1000)
++/* Identity mapping of region 5 addresses in HVM. */
++#define XEN_IA64_OPTF_IDENT_MAP_REG5    3
 +
-+extern struct timer_opts* __init select_timer(void);
-+extern void clock_fallback(void);
-+void setup_pit_timer(void);
++#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET  (0)
 +
-+/* Modifiers for buggy PIT handling */
++struct xen_ia64_opt_feature {
++	unsigned long cmd;		/* Which feature */
++	unsigned char on;		/* Switch feature on/off */
++	union {
++		struct {
++				/* The page protection bit mask of the pte.
++			 	 * This will be or'ed with the pte. */
++			unsigned long pgprot;
++			unsigned long key;	/* A protection key for itir. */
++		};
++	};
++};
 +
-+extern int pit_latch_buggy;
++#endif /* __ASSEMBLY__ */
 +
-+extern struct timer_opts *cur_timer;
-+extern int timer_ack;
++/* xen perfmon */
++#ifdef XEN
++#ifndef __ASSEMBLY__
++#ifndef _ASM_IA64_PERFMON_H
 +
-+/* list of externed timers */
-+extern struct timer_opts timer_none;
-+extern struct timer_opts timer_pit;
-+extern struct init_timer_opts timer_pit_init;
-+extern struct init_timer_opts timer_tsc_init;
-+#ifdef CONFIG_X86_CYCLONE_TIMER
-+extern struct init_timer_opts timer_cyclone_init;
-+#endif
++#include <xen/list.h>   // asm/perfmon.h requires struct list_head
++#include <asm/perfmon.h>
++// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
 +
-+extern unsigned long calibrate_tsc(void);
-+extern void init_cpu_khz(void);
-+#ifdef CONFIG_HPET_TIMER
-+extern struct init_timer_opts timer_hpet_init;
-+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
-+#endif
++#endif /* _ASM_IA64_PERFMON_H */
 +
-+#ifdef CONFIG_X86_PM_TIMER
-+extern struct init_timer_opts timer_pmtmr_init;
-+#endif
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/tlbflush.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/tlbflush.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/tlbflush.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,103 @@
-+#ifndef _X8664_TLBFLUSH_H
-+#define _X8664_TLBFLUSH_H
++DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
++#endif /* __ASSEMBLY__ */
++#endif /* XEN */
 +
-+#include <linux/mm.h>
-+#include <asm/processor.h>
++#ifndef __ASSEMBLY__
++#include "arch-ia64/hvm/memmap.h"
++#endif
 +
-+#define __flush_tlb()	xen_tlb_flush()
++#endif /* __HYPERVISOR_IF_IA64_H__ */
 +
 +/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-ia64/debug_op.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-ia64/debug_op.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,96 @@
++/******************************************************************************
++ * debug_op.h
++ *
++ * Copyright (c) 2007 Tristan Gingold <tgingold at free.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
 + */
-+#define __flush_tlb_global()	xen_tlb_flush()
 +
++#ifndef __XEN_PUBLIC_IA64_DEBUG_OP_H__
++#define __XEN_PUBLIC_IA64_DEBUG_OP_H__
 +
-+extern unsigned long pgkern_mask;
++/* Set/Get extra conditions to break.  */
++#define XEN_IA64_DEBUG_OP_SET_FLAGS 1
++#define XEN_IA64_DEBUG_OP_GET_FLAGS 2
 +
-+#define __flush_tlb_all() __flush_tlb_global()
++/* Break on kernel single step.  */
++#define XEN_IA64_DEBUG_ON_KERN_SSTEP   (1 << 0)
 +
-+#define __flush_tlb_one(addr)	xen_invlpg((unsigned long)addr)
++/* Break on kernel debug (breakpoint or watch point).  */
++#define XEN_IA64_DEBUG_ON_KERN_DEBUG   (1 << 1)
 +
++/* Break on kernel taken branch.  */
++#define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2)
 +
-+/*
-+ * TLB flushing:
-+ *
-+ *  - flush_tlb() flushes the current mm struct TLBs
-+ *  - flush_tlb_all() flushes all processes TLBs
-+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ *  - flush_tlb_page(vma, vmaddr) flushes one page
-+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
-+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ *
-+ * x86-64 can only flush individual pages or full VMs. For a range flush
-+ * we always do the full VM. Might be worth trying if for a small
-+ * range a few INVLPGs in a row are a win.
-+ */
++/* Break on interrupt injection.  */
++#define XEN_IA64_DEBUG_ON_EXTINT       (1 << 3)
 +
-+#ifndef CONFIG_SMP
++/* Break on interrupt injection.  */
++#define XEN_IA64_DEBUG_ON_EXCEPT       (1 << 4)
 +
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
++/* Break on event injection.  */
++#define XEN_IA64_DEBUG_ON_EVENT        (1 << 5)
 +
-+static inline void flush_tlb_mm(struct mm_struct *mm)
-+{
-+	if (mm == current->active_mm)
-+		__flush_tlb();
-+}
++/* Break on privop/virtualized instruction (slow path only).  */
++#define XEN_IA64_DEBUG_ON_PRIVOP       (1 << 6)
 +
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+	unsigned long addr)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb_one(addr);
-+}
++/* Break on emulated PAL call (at entry).  */
++#define XEN_IA64_DEBUG_ON_PAL          (1 << 7)
 +
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+	unsigned long start, unsigned long end)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb();
-+}
++/* Break on emulated SAL call (at entry).  */
++#define XEN_IA64_DEBUG_ON_SAL          (1 << 8)
 +
-+#else
++/* Break on emulated EFI call (at entry).  */
++#define XEN_IA64_DEBUG_ON_EFI          (1 << 9)
 +
-+#include <asm/smp.h>
++/* Break on rfi emulation (slow path only, before exec).  */
++#define XEN_IA64_DEBUG_ON_RFI          (1 << 10)
 +
-+#define local_flush_tlb() \
-+	__flush_tlb()
++/* Break on address translation switch.  */
++#define XEN_IA64_DEBUG_ON_MMU          (1 << 11)
 +
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++/* Break on bad guest physical address.  */
++#define XEN_IA64_DEBUG_ON_BAD_MPA      (1 << 12)
 +
-+#define flush_tlb()	flush_tlb_current_task()
++/* Force psr.ss bit.  */
++#define XEN_IA64_DEBUG_FORCE_SS        (1 << 13)
 +
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-+{
-+	flush_tlb_mm(vma->vm_mm);
-+}
++/* Force psr.db bit.  */
++#define XEN_IA64_DEBUG_FORCE_DB        (1 << 14)
 +
-+#define TLBSTATE_OK	1
-+#define TLBSTATE_LAZY	2
++/* Break on ITR/PTR.  */
++#define XEN_IA64_DEBUG_ON_TR           (1 << 15)
 +
-+/* Roughly an IPI every 20MB with 4k pages for freeing page table
-+   ranges. Cost is about 42k of memory for each CPU. */
-+#define ARCH_FREE_PTE_NR 5350	
++/* Break on ITC/PTC.L/PTC.G/PTC.GA.  */
++#define XEN_IA64_DEBUG_ON_TC           (1 << 16)
 +
-+#endif
++/* Get translation cache.  */
++#define XEN_IA64_DEBUG_OP_GET_TC   3
 +
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++/* Translate virtual address to guest physical address.  */
++#define XEN_IA64_DEBUG_OP_TRANSLATE 4
 +
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+				      unsigned long start, unsigned long end)
-+{
-+	/* x86_64 does not keep any page table caches in a software TLB.
-+	   The CPUs do in their hardware TLBs, but they are handled
-+	   by the normal TLB flushing algorithms. */
-+}
++union xen_ia64_debug_op {
++    uint64_t flags;
++    struct xen_ia64_debug_vtlb {
++        uint64_t nbr;                             /* IN/OUT */
++        XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr;  /* IN/OUT */
++    } vtlb;
++};
++typedef union xen_ia64_debug_op xen_ia64_debug_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t);
 +
-+#endif /* _X8664_TLBFLUSH_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/vga.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/vga.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/vga.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,20 @@
-+/*
-+ *	Access to VGA videoram
++#endif /* __XEN_PUBLIC_IA64_DEBUG_OP_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-ia64/hvm/memmap.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-ia64/hvm/memmap.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * memmap.h
++ *
++ * Copyright (c) 2008 Tristan Gingold <tgingold AT free fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
 + */
 +
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
++#ifndef __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
++#define __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
 +
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
++#define MEM_G  (1UL << 30)
++#define MEM_M  (1UL << 20)
++#define MEM_K  (1UL << 10)
++
++/* Guest physical address of IO ports space.  */
++#define MMIO_START  (3 * MEM_G)
++#define MMIO_SIZE   (512 * MEM_M)
 +
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++#define VGA_IO_START  0xA0000UL
++#define VGA_IO_SIZE   0x20000
 +
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
++#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
++#define LEGACY_IO_SIZE   (64 * MEM_M)
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/xenoprof.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/xenoprof.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/xenoprof.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1 @@
-+#include <asm-i386/mach-xen/asm/xenoprof.h>
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/xor.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/xor.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/asm/xor.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/asm/xor.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,328 @@
-+/*
-+ * x86-64 changes / gcc fixes from Andi Kleen. 
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ *
-+ * This hasn't been optimized for the hammer yet, but there are likely
-+ * no advantages to be gotten from x86-64 here anyways.
-+ */
++#define IO_PAGE_START  (LEGACY_IO_START + LEGACY_IO_SIZE)
++#define IO_PAGE_SIZE   XEN_PAGE_SIZE
 +
-+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
++#define STORE_PAGE_START  (IO_PAGE_START + IO_PAGE_SIZE)
++#define STORE_PAGE_SIZE   XEN_PAGE_SIZE
 +
-+/* Doesn't use gcc to save the XMM registers, because there is no easy way to 
-+   tell it to do a clts before the register saving. */
-+#define XMMS_SAVE do {				\
-+	preempt_disable();			\
-+	if (!(current_thread_info()->status & TS_USEDFPU))	\
-+		clts();				\
-+	__asm__ __volatile__ ( 			\
-+		"movups %%xmm0,(%1)	;\n\t"	\
-+		"movups %%xmm1,0x10(%1)	;\n\t"	\
-+		"movups %%xmm2,0x20(%1)	;\n\t"	\
-+		"movups %%xmm3,0x30(%1)	;\n\t"	\
-+		: "=&r" (cr0)			\
-+		: "r" (xmm_save) 		\
-+		: "memory");			\
-+} while(0)
++#define BUFFER_IO_PAGE_START  (STORE_PAGE_START + STORE_PAGE_SIZE)
++#define BUFFER_IO_PAGE_SIZE   XEN_PAGE_SIZE
 +
-+#define XMMS_RESTORE do {			\
-+	asm volatile (				\
-+		"sfence			;\n\t"	\
-+		"movups (%1),%%xmm0	;\n\t"	\
-+		"movups 0x10(%1),%%xmm1	;\n\t"	\
-+		"movups 0x20(%1),%%xmm2	;\n\t"	\
-+		"movups 0x30(%1),%%xmm3	;\n\t"	\
-+		:				\
-+		: "r" (cr0), "r" (xmm_save)	\
-+		: "memory");			\
-+	if (!(current_thread_info()->status & TS_USEDFPU))	\
-+		stts();				\
-+	preempt_enable();			\
-+} while(0)
++#define BUFFER_PIO_PAGE_START  (BUFFER_IO_PAGE_START + BUFFER_IO_PAGE_SIZE)
++#define BUFFER_PIO_PAGE_SIZE   XEN_PAGE_SIZE
 +
-+#define OFFS(x)		"16*("#x")"
-+#define PF_OFFS(x)	"256+16*("#x")"
-+#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%[p1])		;\n"
-+#define LD(x,y)		"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
-+#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
-+#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%[p2])		;\n"
-+#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%[p3])		;\n"
-+#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%[p4])		;\n"
-+#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%[p5])		;\n"
-+#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%[p6])		;\n"
-+#define XO1(x,y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
-+#define XO2(x,y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
-+#define XO3(x,y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
-+#define XO4(x,y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
-+#define XO5(x,y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
++#define IO_SAPIC_START  0xfec00000UL
++#define IO_SAPIC_SIZE   0x100000
 +
++#define PIB_START  0xfee00000UL
++#define PIB_SIZE   0x200000
 +
-+static void
-+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-+{
-+        unsigned int lines = bytes >> 8;
-+	unsigned long cr0;
-+	xmm_store_t xmm_save[4];
++#define GFW_START  (4 * MEM_G - 16 * MEM_M)
++#define GFW_SIZE   (16 * MEM_M)
 +
-+	XMMS_SAVE;
++/* domVTI */
++#define GPFN_FRAME_BUFFER  0x1 /* VGA framebuffer */
++#define GPFN_LOW_MMIO      0x2 /* Low MMIO range */
++#define GPFN_PIB           0x3 /* PIB base */
++#define GPFN_IOSAPIC       0x4 /* IOSAPIC base */
++#define GPFN_LEGACY_IO     0x5 /* Legacy I/O base */
++#define GPFN_HIGH_MMIO     0x6 /* High MMIO range */
 +
-+        asm volatile (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+		PF1(i)					\
-+				PF1(i+2)		\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
++/* Nvram belongs to GFW memory space  */
++#define NVRAM_SIZE   (MEM_K * 64)
++#define NVRAM_START  (GFW_START + 10 * MEM_M)
 +
++#define NVRAM_VALID_SIG  0x4650494e45584948 /* "HIXENIPF" */
++struct nvram_save_addr {
++    unsigned long addr;
++    unsigned long signature;
++};
 +
-+		PF0(0)
-+				PF0(2)
++#endif /* __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ */
 +
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-ia64/hvm/save.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-ia64/hvm/save.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,201 @@
++/******************************************************************************
++ * save_types.h
++ *
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
 +
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
++#ifndef __XEN_PUBLIC_HVM_SAVE_IA64_H__
++#define __XEN_PUBLIC_HVM_SAVE_IA64_H__
 +
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+		"		decl %[cnt] ; jnz 1b"
-+	: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
-+	: [inc] "r" (256UL) 
-+        : "memory");
++#include <public/hvm/save.h>
++#include <public/arch-ia64.h>
 +
-+	XMMS_RESTORE;
-+}
++/* 
++ * Save/restore header: general info about the save file. 
++ */
 +
-+static void
-+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3)
-+{
-+	unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4];
-+	unsigned long cr0;
++/* x86 uses 0x54381286 */
++#define HVM_FILE_MAGIC   0x343641492f6e6558UL   /* "Xen/IA64" */
++#define HVM_FILE_VERSION 0x0000000000000001UL
 +
-+	XMMS_SAVE;
++struct hvm_save_header {
++    uint64_t magic;             /* Must be HVM_FILE_MAGIC */
++    uint64_t version;           /* File format version */
++    uint64_t changeset;         /* Version of Xen that saved this file */
++    uint64_t cpuid[5];          /* CPUID[0x01][%eax] on the saving machine */
++};
 +
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
 +
++/*
++ * CPU
++ */
++struct hvm_hw_ia64_cpu {
++    uint64_t    ipsr;
++};
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_ia64_cpu);
 +
-+		PF0(0)
-+				PF0(2)
++/*
++ * CPU
++ */
++struct hvm_hw_ia64_vpd {
++    struct vpd      vpd;
++};
++DECLARE_HVM_SAVE_TYPE(VPD, 3, struct hvm_hw_ia64_vpd);
 +
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
++/*
++ * device dependency
++ * vacpi => viosapic => vlsapic
++ */
++/*
++ * vlsapic
++ */
++struct hvm_hw_ia64_vlsapic {
++    uint64_t insvc[4];
++    uint64_t vhpi; // ??? should this be saved in vpd
++    uint8_t xtp;
++    uint8_t pal_init_pending;
++    uint8_t pad[2];
++};
++DECLARE_HVM_SAVE_TYPE(VLSAPIC, 4, struct hvm_hw_ia64_vlsapic);
++/* set
++ * unconditionaly set v->arch.irq_new_peding = 1 
++ * unconditionaly set v->arch.irq_new_condition = 0
++ */
 +
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
++/*
++ * vtime
++ */
++/* itc, itm, itv are saved by arch vcpu context */
++struct hvm_hw_ia64_vtime {
++    uint64_t itc;
++    uint64_t itm;
 +
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]          ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+		"		decl %[cnt] ; jnz 1b"
-+	: [cnt] "+r" (lines),
-+	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
-+	: [inc] "r" (256UL)
-+	: "memory"); 
-+	XMMS_RESTORE;
-+}
++    uint64_t last_itc;
++    uint64_t pending;
++};
++DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct hvm_hw_ia64_vtime);
++/*
++ * calculate v->vtm.vtm_offset
++ * ??? Or should vtm_offset be set by leave_hypervisor_tail()?
++ * start vtm_timer if necessary by vtm_set_itm().
++ * ??? Or should vtm_timer be set by leave_hypervisor_tail()?
++ *
++ * ??? or should be done by schedule_tail()
++ *        => schedule_tail() should do.
++ */
 +
-+static void
-+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3, unsigned long *p4)
++/*
++ * viosapic
++ */
++#define VIOSAPIC_NUM_PINS     48
++
++union viosapic_rte
 +{
-+	unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4]; 
-+	unsigned long cr0;
++    uint64_t bits;
++    struct {
++        uint8_t vector;
 +
-+	XMMS_SAVE;
++        uint8_t delivery_mode  : 3;
++        uint8_t reserve1       : 1;
++        uint8_t delivery_status: 1;
++        uint8_t polarity       : 1;
++        uint8_t reserve2       : 1;
++        uint8_t trig_mode      : 1;
++
++        uint8_t mask           : 1;
++        uint8_t reserve3       : 7;
++
++        uint8_t reserved[3];
++        uint16_t dest_id;
++    }; 
++};
++
++struct hvm_hw_ia64_viosapic {
++    uint64_t    irr;
++    uint64_t    isr;
++    uint32_t    ioregsel;
++    uint32_t    pad;
++    uint64_t    lowest_vcpu_id;
++    uint64_t    base_address;
++    union viosapic_rte  redirtbl[VIOSAPIC_NUM_PINS];
++};
++DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic);
++  
++/*
++ * vacpi
++ * PM timer
++ */
++struct vacpi_regs {
++    union {
++        struct {
++            uint32_t pm1a_sts:16;/* PM1a_EVT_BLK.PM1a_STS: status register */
++            uint32_t pm1a_en:16; /* PM1a_EVT_BLK.PM1a_EN: enable register */
++        };
++        uint32_t evt_blk;
++    };
++    uint32_t tmr_val;   /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++};
 +
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		PF3(i)					\
-+				PF3(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		XO3(i,0)				\
-+			XO3(i+1,1)			\
-+				XO3(i+2,2)		\
-+					XO3(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
++struct hvm_hw_ia64_vacpi {
++    struct vacpi_regs   regs;
++};
++DECLARE_HVM_SAVE_TYPE(VACPI, 7, struct hvm_hw_ia64_vacpi);
++/* update last_gtime and setup timer of struct vacpi */
++
++/*
++ * opt_feature: identity mapping of region 4, 5 and 7.
++ * With the c/s 16396:d2935f9c217f of xen-ia64-devel.hg,
++ * opt_feature hypercall supports only region 4,5,7 identity mappings.
++ * structure hvm_hw_ia64_identity_mappings only supports them.
++ * The new structure, struct hvm_hw_ia64_identity_mappings, is created to
++ * avoid to keep up with change of the xen/ia64 internal structure, struct
++ * opt_feature.
++ *
++ * If it is enhanced in the future, new structure will be created.
++ */
++struct hvm_hw_ia64_identity_mapping {
++    uint64_t on;        /* on/off */
++    uint64_t pgprot;    /* The page protection bit mask of the pte. */
++    uint64_t key;       /* A protection key. */
++};
++
++struct hvm_hw_ia64_identity_mappings {
++    struct hvm_hw_ia64_identity_mapping im_reg4;/* Region 4 identity mapping */
++    struct hvm_hw_ia64_identity_mapping im_reg5;/* Region 5 identity mapping */
++    struct hvm_hw_ia64_identity_mapping im_reg7;/* Region 7 identity mapping */
++};
++DECLARE_HVM_SAVE_TYPE(OPT_FEATURE_IDENTITY_MAPPINGS, 8, struct hvm_hw_ia64_identity_mappings);
++
++/* 
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX       8
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-ia64/sioemu.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-ia64/sioemu.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,89 @@
++/******************************************************************************
++ * sioemu.h
++ *
++ * Copyright (c) 2008 Tristan Gingold <tgingold at free.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
 +
++#ifndef __XEN_PUBLIC_IA64_SIOEMU_H__
++#define __XEN_PUBLIC_IA64_SIOEMU_H__
 +
-+		PF0(0)
-+				PF0(2)
++/* SIOEMU specific hypercalls.
++   The numbers are the minor part of FW_HYPERCALL_SIOEMU.  */
 +
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
++/* Defines the callback entry point.  r8=ip, r9=data.
++   Must be called per-vcpu.  */
++#define SIOEMU_HYPERCALL_SET_CALLBACK 0x01
 +
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
++/* Finish sioemu fw initialization and start firmware.  r8=ip.  */
++#define SIOEMU_HYPERCALL_START_FW 0x02
 +
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+        "       addq %[inc], %[p4]           ;\n"
-+	"	decl %[cnt] ; jnz 1b"
-+	: [cnt] "+c" (lines),
-+	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
-+	: [inc] "r" (256UL)
-+        : "memory" );
++/* Add IO pages in physmap.  */
++#define SIOEMU_HYPERCALL_ADD_IO_PHYSMAP 0x03
 +
-+	XMMS_RESTORE;
-+}
++/* Get wallclock time.  */
++#define SIOEMU_HYPERCALL_GET_TIME 0x04
 +
-+static void
-+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
-+{
-+        unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4];
-+	unsigned long cr0;
++/* Flush cache.  */
++#define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07
 +
-+	XMMS_SAVE;
++/* Get freq base.  */
++#define SIOEMU_HYPERCALL_FREQ_BASE 0x08
 +
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		PF3(i)					\
-+				PF3(i+2)		\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		PF4(i)					\
-+				PF4(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO3(i,0)				\
-+			XO3(i+1,1)			\
-+				XO3(i+2,2)		\
-+					XO3(i+3,3)	\
-+		XO4(i,0)				\
-+			XO4(i+1,1)			\
-+				XO4(i+2,2)		\
-+					XO4(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
++/* Return from callback.  */
++#define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09
 +
++/* Deliver an interrupt.  */
++#define SIOEMU_HYPERCALL_DELIVER_INT 0x0a
 +
-+		PF0(0)
-+				PF0(2)
++/* SIOEMU callback reason.  */
 +
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
++/* An event (from event channel) has to be delivered.  */
++#define SIOEMU_CB_EVENT       0x00
 +
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
++/* Emulate an IO access.  */
++#define SIOEMU_CB_IO_EMULATE  0x01
 +
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+        "       addq %[inc], %[p4]           ;\n"
-+        "       addq %[inc], %[p5]           ;\n"
-+	"	decl %[cnt] ; jnz 1b"
-+	: [cnt] "+c" (lines),
-+  	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), 
-+	  [p5] "+r" (p5)
-+	: [inc] "r" (256UL)
-+	: "memory");
++/* An IPI is sent to a dead vcpu.  */
++#define SIOEMU_CB_WAKEUP_VCPU 0x02
 +
-+	XMMS_RESTORE;
-+}
++/* A SAL hypercall is executed.  */
++#define SIOEMU_CB_SAL_ASSIST  0x03
 +
-+static struct xor_block_template xor_block_sse = {
-+        .name = "generic_sse",
-+        .do_2 = xor_sse_2,
-+        .do_3 = xor_sse_3,
-+        .do_4 = xor_sse_4,
-+        .do_5 = xor_sse_5,
++#ifndef __ASSEMBLY__
++struct sioemu_callback_info {
++    /* Saved registers.  */
++    unsigned long ip;
++    unsigned long psr;
++    unsigned long ifs;
++    unsigned long nats;
++    unsigned long r8;
++    unsigned long r9;
++    unsigned long r10;
++    unsigned long r11;
++
++    /* Callback parameters.  */
++    unsigned long cause;
++    unsigned long arg0;
++    unsigned long arg1;
++    unsigned long arg2;
++    unsigned long arg3;
++    unsigned long _pad2[2];
++    unsigned long r2;
 +};
-+
-+#undef XOR_TRY_TEMPLATES
-+#define XOR_TRY_TEMPLATES				\
-+	do {						\
-+		xor_speed(&xor_block_sse);	\
-+	} while (0)
-+
-+/* We force the use of the SSE xor block because it can write around L2.
-+   We may also be able to load into the L1 only depending on how the cpu
-+   deals with a load to a line that is being prefetched.  */
-+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/irq_vectors.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/irq_vectors.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/irq_vectors.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,123 @@
++#endif /* __ASSEMBLY__ */
++#endif /* __XEN_PUBLIC_IA64_SIOEMU_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-powerpc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-powerpc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,120 @@
 +/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ *	FIRST_EXTERNAL_VECTOR:
-+ *		The first free place for external interrupts
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
 + *
-+ *	SYSCALL_VECTOR:
-+ *		The IRQ vector a syscall makes the user to kernel transition
-+ *		under.
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
 + *
-+ *	TIMER_IRQ:
-+ *		The IRQ number the timer interrupt comes in at.
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
 + *
-+ *	NR_IRQS:
-+ *		The total number of interrupt vectors (including all the
-+ *		architecture specific interrupts) needed.
++ * Copyright (C) IBM Corp. 2005, 2006
 + *
-+ */			
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
 + */
-+#define FIRST_EXTERNAL_VECTOR	0x20
 +
-+#define SYSCALL_VECTOR		0x80
++#include "xen.h"
++
++#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
++#define __XEN_PUBLIC_ARCH_PPC_64_H__
++
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++    typedef struct { \
++        int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
++        type *p; \
++    } __attribute__((__aligned__(8))) __guest_handle_ ## name
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
++    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
++#define set_xen_guest_handle(hnd, val) \
++    do { \
++        if (sizeof ((hnd).__pad)) \
++            (hnd).__pad[0] = 0; \
++        (hnd).p = val; \
++    } while (0)
++
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long long xen_pfn_t;
++#define PRI_xen_pfn "llx"
++#endif
 +
 +/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
++ * Pointers and other address fields inside interface structures are padded to
++ * 64 bits. This means that field alignments aren't different between 32- and
++ * 64-bit architectures. 
 + */
++/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
++#define __MEMORY_PADDING(_X)
++#define _MEMORY_PADDING(_X)  __MEMORY_PADDING(_X)
++#define MEMORY_PADDING       _MEMORY_PADDING(__LINE__)
 +
-+#if 0
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++/* And the trap vector is... */
++#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
++
++#ifndef __ASSEMBLY__
++
++#define XENCOMM_INLINE_FLAG (1UL << 63)
++
++typedef uint64_t xen_ulong_t;
++
++/* User-accessible registers: nost of these need to be saved/restored
++ * for every nested Xen invocation. */
++struct cpu_user_regs
++{
++    uint64_t gprs[32];
++    uint64_t lr;
++    uint64_t ctr;
++    uint64_t srr0;
++    uint64_t srr1;
++    uint64_t pc;
++    uint64_t msr;
++    uint64_t fpscr;             /* XXX Is this necessary */
++    uint64_t xer;
++    uint64_t hid4;              /* debug only */
++    uint64_t dar;               /* debug only */
++    uint32_t dsisr;             /* debug only */
++    uint32_t cr;
++    uint32_t __pad;             /* good spot for another 32bit reg */
++    uint32_t entry_vector;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
++
++/* ONLY used to communicate with dom0! See also struct exec_domain. */
++struct vcpu_guest_context {
++    cpu_user_regs_t user_regs;         /* User-level CPU registers     */
++    uint64_t sdr1;                     /* Pagetable base               */
++    /* XXX etc */
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++    uint64_t boot_timebase;
++};
++
++struct arch_vcpu_info {
++};
++
++/* Support for multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++#endif
++
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/cpuid.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/cpuid.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,68 @@
++/******************************************************************************
++ * arch-x86/cpuid.h
++ * 
++ * CPUID interface to Xen.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
 + *
-+ *  some of the following vectors are 'rare', they are merged
-+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ *  TLB, reschedule and local APIC vectors are performance-critical.
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
 + *
-+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ * 
++ * Copyright (c) 2007 Citrix Systems, Inc.
++ * 
++ * Authors:
++ *    Keir Fraser <keir.fraser at citrix.com>
 + */
-+#define INVALIDATE_TLB_VECTOR	0xfd
-+#define RESCHEDULE_VECTOR	0xfc
-+#define CALL_FUNCTION_VECTOR	0xfb
 +
-+#define THERMAL_APIC_VECTOR	0xf0
++#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
++#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
++
++/* Xen identification leaves start at 0x40000000. */
++#define XEN_CPUID_FIRST_LEAF 0x40000000
++#define XEN_CPUID_LEAF(i)    (XEN_CPUID_FIRST_LEAF + (i))
++
 +/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
++ * Leaf 1 (0x40000000)
++ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
++ *      are supported by the Xen host.
++ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
++ *      of a Xen host.
 + */
-+#define LOCAL_TIMER_VECTOR	0xef
-+#endif
-+
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
++#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
++#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
++#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
 +
 +/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
++ * Leaf 2 (0x40000001)
++ * EAX[31:16]: Xen major version.
++ * EAX[15: 0]: Xen minor version.
++ * EBX-EDX: Reserved (currently all zeroes).
 + */
-+#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef
 +
 +/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
++ * Leaf 3 (0x40000002)
++ * EAX: Number of hypercall transfer pages. This register is always guaranteed
++ *      to specify one hypercall page.
++ * EBX: Base address of Xen-specific MSRs.
++ * ECX: Features 1. Unused bits are set to zero.
++ * EDX: Features 2. Unused bits are set to zero.
 + */
 +
-+#define RESCHEDULE_VECTOR	0
-+#define CALL_FUNCTION_VECTOR	1
-+#define NR_IPIS			2
++/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
++#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
++#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD  (1u<<0)
++
++#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/hvm/save.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/hvm/save.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,429 @@
++/* 
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ * 
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
++#define __XEN_PUBLIC_HVM_SAVE_X86_H__
++
++/* 
++ * Save/restore header: general info about the save file. 
++ */
++
++#define HVM_FILE_MAGIC   0x54381286
++#define HVM_FILE_VERSION 0x00000001
++
++struct hvm_save_header {
++    uint32_t magic;             /* Must be HVM_FILE_MAGIC */
++    uint32_t version;           /* File format version */
++    uint64_t changeset;         /* Version of Xen that saved this file */
++    uint32_t cpuid;             /* CPUID[0x01][%eax] on the saving machine */
++    uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
++
 +
 +/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
++ * Processor
 + */
-+#define NR_VECTORS 256
 +
-+#define FPU_IRQ			13
++struct hvm_hw_cpu {
++    uint8_t  fpu_regs[512];
++
++    uint64_t rax;
++    uint64_t rbx;
++    uint64_t rcx;
++    uint64_t rdx;
++    uint64_t rbp;
++    uint64_t rsi;
++    uint64_t rdi;
++    uint64_t rsp;
++    uint64_t r8;
++    uint64_t r9;
++    uint64_t r10;
++    uint64_t r11;
++    uint64_t r12;
++    uint64_t r13;
++    uint64_t r14;
++    uint64_t r15;
++
++    uint64_t rip;
++    uint64_t rflags;
++
++    uint64_t cr0;
++    uint64_t cr2;
++    uint64_t cr3;
++    uint64_t cr4;
++
++    uint64_t dr0;
++    uint64_t dr1;
++    uint64_t dr2;
++    uint64_t dr3;
++    uint64_t dr6;
++    uint64_t dr7;    
++
++    uint32_t cs_sel;
++    uint32_t ds_sel;
++    uint32_t es_sel;
++    uint32_t fs_sel;
++    uint32_t gs_sel;
++    uint32_t ss_sel;
++    uint32_t tr_sel;
++    uint32_t ldtr_sel;
++
++    uint32_t cs_limit;
++    uint32_t ds_limit;
++    uint32_t es_limit;
++    uint32_t fs_limit;
++    uint32_t gs_limit;
++    uint32_t ss_limit;
++    uint32_t tr_limit;
++    uint32_t ldtr_limit;
++    uint32_t idtr_limit;
++    uint32_t gdtr_limit;
++
++    uint64_t cs_base;
++    uint64_t ds_base;
++    uint64_t es_base;
++    uint64_t fs_base;
++    uint64_t gs_base;
++    uint64_t ss_base;
++    uint64_t tr_base;
++    uint64_t ldtr_base;
++    uint64_t idtr_base;
++    uint64_t gdtr_base;
++
++    uint32_t cs_arbytes;
++    uint32_t ds_arbytes;
++    uint32_t es_arbytes;
++    uint32_t fs_arbytes;
++    uint32_t gs_arbytes;
++    uint32_t ss_arbytes;
++    uint32_t tr_arbytes;
++    uint32_t ldtr_arbytes;
++
++    uint32_t sysenter_cs;
++    uint32_t padding0;
++
++    uint64_t sysenter_esp;
++    uint64_t sysenter_eip;
++
++    /* msr for em64t */
++    uint64_t shadow_gs;
++
++    /* msr content saved/restored. */
++    uint64_t msr_flags;
++    uint64_t msr_lstar;
++    uint64_t msr_star;
++    uint64_t msr_cstar;
++    uint64_t msr_syscall_mask;
++    uint64_t msr_efer;
++
++    /* guest's idea of what rdtsc() would return */
++    uint64_t tsc;
++
++    /* pending event, if any */
++    union {
++        uint32_t pending_event;
++        struct {
++            uint8_t  pending_vector:8;
++            uint8_t  pending_type:3;
++            uint8_t  pending_error_valid:1;
++            uint32_t pending_reserved:19;
++            uint8_t  pending_valid:1;
++        };
++    };
++    /* error code for pending event */
++    uint32_t error_code;
++};
++
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
 +
-+#define	FIRST_VM86_IRQ		3
-+#define LAST_VM86_IRQ		15
-+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
 +
 +/*
-+ * The flat IRQ space is divided into two regions:
-+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
-+ *     if we have physical device-access privilege. This region is at the 
-+ *     start of the IRQ space so that existing device drivers do not need
-+ *     to be modified to translate physical IRQ numbers into our IRQ space.
-+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ *     are bound using the provided bind/unbind functions.
++ * PIC
 + */
 +
-+#define PIRQ_BASE		0
-+#define NR_PIRQS		256
++struct hvm_hw_vpic {
++    /* IR line bitmasks. */
++    uint8_t irr;
++    uint8_t imr;
++    uint8_t isr;
++
++    /* Line IRx maps to IRQ irq_base+x */
++    uint8_t irq_base;
++
++    /*
++     * Where are we in ICW2-4 initialisation (0 means no init in progress)?
++     * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
++     * Bit 2: ICW1.IC4  (1 == ICW4 included in init sequence)
++     * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
++     */
++    uint8_t init_state:4;
++
++    /* IR line with highest priority. */
++    uint8_t priority_add:4;
++
++    /* Reads from A=0 obtain ISR or IRR? */
++    uint8_t readsel_isr:1;
++
++    /* Reads perform a polling read? */
++    uint8_t poll:1;
++
++    /* Automatically clear IRQs from the ISR during INTA? */
++    uint8_t auto_eoi:1;
 +
-+#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS		256
++    /* Automatically rotate IRQ priorities during AEOI? */
++    uint8_t rotate_on_auto_eoi:1;
 +
-+#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS		NR_IRQS
++    /* Exclude slave inputs when considering in-service IRQs? */
++    uint8_t special_fully_nested_mode:1;
 +
-+#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
++    /* Special mask mode excludes masked IRs from AEOI and priority checks. */
++    uint8_t special_mask_mode:1;
 +
-+#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
++    /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
++    uint8_t is_master:1;
 +
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/mach_time.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/mach_time.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/mach_time.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/mach_time.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,111 @@
-+/*
-+ *  include/asm-i386/mach-default/mach_time.h
-+ *
-+ *  Machine specific set RTC function for generic.
-+ *  Split out from time.c by Osamu Tomita <tomita at cinet.co.jp>
-+ */
-+#ifndef _MACH_TIME_H
-+#define _MACH_TIME_H
++    /* Edge/trigger selection. */
++    uint8_t elcr;
 +
-+#include <asm-i386/mc146818rtc.h>
++    /* Virtual INT output. */
++    uint8_t int_output;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
 +
-+/* for check timing call set_rtc_mmss() 500ms     */
-+/* used in arch/i386/time.c::do_timer_interrupt() */
-+#define USEC_AFTER	500000
-+#define USEC_BEFORE	500000
 +
 +/*
-+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
-+ * called 500 ms after the second nowtime has started, because when
-+ * nowtime is written into the registers of the CMOS clock, it will
-+ * jump to the next second precisely 500 ms later. Check the Motorola
-+ * MC146818A or Dallas DS12887 data sheet for details.
-+ *
-+ * BUG: This routine does not handle hour overflow properly; it just
-+ *      sets the minutes. Usually you'll only notice that after reboot!
++ * IO-APIC
 + */
-+static inline int mach_set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval = 0;
-+	int real_seconds, real_minutes, cmos_minutes;
-+	unsigned char save_control, save_freq_select;
-+
-+	save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
-+	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-+
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
-+	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 +
-+	cmos_minutes = CMOS_READ(RTC_MINUTES);
-+	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-+		BCD_TO_BIN(cmos_minutes);
++#ifdef __ia64__
++#define VIOAPIC_IS_IOSAPIC 1
++#define VIOAPIC_NUM_PINS  24
++#else
++#define VIOAPIC_NUM_PINS  48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
++#endif
 +
-+	/*
-+	 * since we're only adjusting minutes and seconds,
-+	 * don't interfere with hour overflow. This avoids
-+	 * messing with unknown time zones but requires your
-+	 * RTC not to be off by more than 15 minutes
-+	 */
-+	real_seconds = nowtime % 60;
-+	real_minutes = nowtime / 60;
-+	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-+		real_minutes += 30;		/* correct for half hour time zone */
-+	real_minutes %= 60;
++struct hvm_hw_vioapic {
++    uint64_t base_address;
++    uint32_t ioregsel;
++    uint32_t id;
++    union vioapic_redir_entry
++    {
++        uint64_t bits;
++        struct {
++            uint8_t vector;
++            uint8_t delivery_mode:3;
++            uint8_t dest_mode:1;
++            uint8_t delivery_status:1;
++            uint8_t polarity:1;
++            uint8_t remote_irr:1;
++            uint8_t trig_mode:1;
++            uint8_t mask:1;
++            uint8_t reserve:7;
++#if !VIOAPIC_IS_IOSAPIC
++            uint8_t reserved[4];
++            uint8_t dest_id;
++#else
++            uint8_t reserved[3];
++            uint16_t dest_id;
++#endif
++        } fields;
++    } redirtbl[VIOAPIC_NUM_PINS];
++};
 +
-+	if (abs(real_minutes - cmos_minutes) < 30) {
-+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+			BIN_TO_BCD(real_seconds);
-+			BIN_TO_BCD(real_minutes);
-+		}
-+		CMOS_WRITE(real_seconds,RTC_SECONDS);
-+		CMOS_WRITE(real_minutes,RTC_MINUTES);
-+	} else {
-+		printk(KERN_WARNING
-+		       "set_rtc_mmss: can't update from %d to %d\n",
-+		       cmos_minutes, real_minutes);
-+		retval = -1;
-+	}
++DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
 +
-+	/* The following flags have to be released exactly in this order,
-+	 * otherwise the DS12887 (popular MC146818A clone with integrated
-+	 * battery and quartz) will not reset the oscillator and will not
-+	 * update precisely 500 ms later. You won't find this mentioned in
-+	 * the Dallas Semiconductor data sheets, but who believes data
-+	 * sheets anyway ...                           -- Markus Kuhn
-+	 */
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 +
-+	return retval;
-+}
++/*
++ * LAPIC
++ */
 +
-+static inline unsigned long mach_get_cmos_time(void)
-+{
-+	unsigned int year, mon, day, hour, min, sec;
++struct hvm_hw_lapic {
++    uint64_t             apic_base_msr;
++    uint32_t             disabled; /* VLAPIC_xx_DISABLED */
++    uint32_t             timer_divisor;
++};
 +
-+	do {
-+		sec = CMOS_READ(RTC_SECONDS);
-+		min = CMOS_READ(RTC_MINUTES);
-+		hour = CMOS_READ(RTC_HOURS);
-+		day = CMOS_READ(RTC_DAY_OF_MONTH);
-+		mon = CMOS_READ(RTC_MONTH);
-+		year = CMOS_READ(RTC_YEAR);
-+	} while (sec != CMOS_READ(RTC_SECONDS));
++DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
 +
-+	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+		BCD_TO_BIN(sec);
-+		BCD_TO_BIN(min);
-+		BCD_TO_BIN(hour);
-+		BCD_TO_BIN(day);
-+		BCD_TO_BIN(mon);
-+		BCD_TO_BIN(year);
-+	}
++struct hvm_hw_lapic_regs {
++    uint8_t data[1024];
++};
 +
-+	year += 1900;
-+	if (year < 1970)
-+		year += 100;
++DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
 +
-+	return mktime(year, mon, day, hour, min, sec);
-+}
 +
-+#endif /* !_MACH_TIME_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/mach_timer.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/mach_timer.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/mach_timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/mach_timer.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,50 @@
 +/*
-+ *  include/asm-i386/mach-default/mach_timer.h
-+ *
-+ *  Machine specific calibrate_tsc() for generic.
-+ *  Split out from timer_tsc.c by Osamu Tomita <tomita at cinet.co.jp>
-+ */
-+/* ------ Calibrate the TSC ------- 
-+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
-+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
-+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
-+ * output busy loop as low as possible. We avoid reading the CTC registers
-+ * directly because of the awkward 8-bit access mechanism of the 82C54
-+ * device.
++ * IRQs
 + */
-+#ifndef _MACH_TIMER_H
-+#define _MACH_TIMER_H
-+
-+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
-+#define CALIBRATE_LATCH	\
-+	((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
 +
-+static inline void mach_prepare_counter(void)
-+{
-+       /* Set the Gate high, disable speaker */
-+	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
++struct hvm_hw_pci_irqs {
++    /*
++     * Virtual interrupt wires for a single PCI bus.
++     * Indexed by: device*4 + INTx#.
++     */
++    union {
++        DECLARE_BITMAP(i, 32*4);
++        uint64_t pad[2];
++    };
++};
 +
-+	/*
-+	 * Now let's take care of CTC channel 2
-+	 *
-+	 * Set the Gate high, program CTC channel 2 for mode 0,
-+	 * (interrupt on terminal count mode), binary count,
-+	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
-+	 *
-+	 * Some devices need a delay here.
-+	 */
-+	outb(0xb0, 0x43);			/* binary, mode 0, LSB/MSB, Ch 2 */
-+	outb_p(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
-+	outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
-+}
++DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
 +
-+static inline void mach_countup(unsigned long *count_p)
-+{
-+	unsigned long count = 0;
-+	do {
-+		count++;
-+	} while ((inb_p(0x61) & 0x20) == 0);
-+	*count_p = count;
-+}
++struct hvm_hw_isa_irqs {
++    /*
++     * Virtual interrupt wires for ISA devices.
++     * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
++     */
++    union {
++        DECLARE_BITMAP(i, 16);
++        uint64_t pad[1];
++    };
++};
 +
-+#endif /* !_MACH_TIMER_H */
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/setup_arch_post.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/setup_arch_post.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/setup_arch_post.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,63 @@
-+/**
-+ * machine_specific_* - Hooks for machine specific setup.
-+ *
-+ * Description:
-+ *	This is included late in kernel/setup.c so that it can make
-+ *	use of all of the static functions.
-+ **/
++DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
 +
-+#include <xen/interface/callback.h>
++struct hvm_hw_pci_link {
++    /*
++     * PCI-ISA interrupt router.
++     * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
++     * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
++     * The router provides a programmable mapping from each link to a GSI.
++     */
++    uint8_t route[4];
++    uint8_t pad0[4];
++};
 +
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
++DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
 +
-+static void __init machine_specific_arch_setup(void)
-+{
-+	int ret;
-+	static struct callback_register __initdata event = {
-+		.type = CALLBACKTYPE_event,
-+		.address = (unsigned long) hypervisor_callback,
-+	};
-+	static struct callback_register __initdata failsafe = {
-+		.type = CALLBACKTYPE_failsafe,
-+		.address = (unsigned long)failsafe_callback,
-+	};
-+	static struct callback_register __initdata syscall = {
-+		.type = CALLBACKTYPE_syscall,
-+		.address = (unsigned long)system_call,
-+	};
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	static struct callback_register __initdata nmi_cb = {
-+		.type = CALLBACKTYPE_nmi,
-+		.address = (unsigned long)nmi,
-+	};
-+#endif
++/* 
++ *  PIT
++ */
 +
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
-+	if (ret == 0)
-+		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
-+	if (ret == 0)
-+		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret == -ENOSYS)
-+		ret = HYPERVISOR_set_callbacks(
-+			event.address,
-+			failsafe.address,
-+			syscall.address);
-+#endif
-+	BUG_ON(ret);
++struct hvm_hw_pit {
++    struct hvm_hw_pit_channel {
++        uint32_t count; /* can be 65536 */
++        uint16_t latched_count;
++        uint8_t count_latched;
++        uint8_t status_latched;
++        uint8_t status;
++        uint8_t read_state;
++        uint8_t write_state;
++        uint8_t write_latch;
++        uint8_t rw_mode;
++        uint8_t mode;
++        uint8_t bcd; /* not supported */
++        uint8_t gate; /* timer start */
++    } channels[3];  /* 3 x 16 bytes */
++    uint32_t speaker_data_on;
++    uint32_t pad0;
++};
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+	if (ret == -ENOSYS) {
-+		static struct xennmi_callback __initdata cb = {
-+			.handler_address = (unsigned long)nmi
-+		};
++DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
 +
-+		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
-+	}
-+#endif
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/setup_arch_pre.h tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/setup_arch_pre.h
---- pristine-linux-2.6.18.2/include/asm-x86_64/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/asm-x86_64/mach-xen/setup_arch_pre.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
 +
-+#define ARCH_SETUP machine_specific_arch_setup();
++/* 
++ * RTC
++ */ 
 +
-+static void __init machine_specific_arch_setup(void);
-diff -Nurp pristine-linux-2.6.18.2/include/linux/aio.h tmp-linux-2.6-xen.patch/include/linux/aio.h
---- pristine-linux-2.6.18.2/include/linux/aio.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/aio.h	2007-10-14 01:51:15.000000000 +0200
-@@ -191,6 +191,11 @@ struct kioctx {
- 	struct aio_ring_info	ring_info;
- 
- 	struct work_struct	wq;
-+#ifdef CONFIG_EPOLL
-+	// poll integration
-+	wait_queue_head_t       poll_wait;
-+	struct file		*file;
-+#endif
- };
- 
- /* prototypes */
-diff -Nurp pristine-linux-2.6.18.2/include/linux/crash_dump.h tmp-linux-2.6-xen.patch/include/linux/crash_dump.h
---- pristine-linux-2.6.18.2/include/linux/crash_dump.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/crash_dump.h	2007-10-14 01:51:15.000000000 +0200
-@@ -14,5 +14,13 @@ extern ssize_t copy_oldmem_page(unsigned
- extern const struct file_operations proc_vmcore_operations;
- extern struct proc_dir_entry *proc_vmcore;
- 
-+/* Architecture code defines this if there are other possible ELF
-+ * machine types, e.g. on bi-arch capable hardware. */
-+#ifndef vmcore_elf_check_arch_cross
-+#define vmcore_elf_check_arch_cross(x) 0
-+#endif
++#define RTC_CMOS_SIZE 14
++struct hvm_hw_rtc {
++    /* CMOS bytes */
++    uint8_t cmos_data[RTC_CMOS_SIZE];
++    /* Index register for 2-part operations */
++    uint8_t cmos_index;
++    uint8_t pad0;
++};
 +
-+#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
++DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
 +
- #endif /* CONFIG_CRASH_DUMP */
- #endif /* LINUX_CRASHDUMP_H */
-diff -Nurp pristine-linux-2.6.18.2/include/linux/elfnote.h tmp-linux-2.6-xen.patch/include/linux/elfnote.h
---- pristine-linux-2.6.18.2/include/linux/elfnote.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/elfnote.h	2007-10-14 01:51:15.000000000 +0200
-@@ -0,0 +1,104 @@
-+#ifndef _LINUX_ELFNOTE_H
-+#define _LINUX_ELFNOTE_H
-+/*
-+ * Helper macros to generate ELF Note structures, which are put into a
-+ * PT_NOTE segment of the final vmlinux image.  These are useful for
-+ * including name-value pairs of metadata into the kernel binary (or
-+ * modules?) for use by external programs.
-+ *
-+ * Each note has three parts: a name, a type and a desc.  The name is
-+ * intended to distinguish the note's originator, so it would be a
-+ * company, project, subsystem, etc; it must be in a suitable form for
-+ * use in a section name.  The type is an integer which is used to tag
-+ * the data, and is considered to be within the "name" namespace (so
-+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
-+ * "desc" field is the actual data.  There are no constraints on the
-+ * desc field's contents, though typically they're fairly small.
-+ *
-+ * All notes from a given NAME are put into a section named
-+ * .note.NAME.  When the kernel image is finally linked, all the notes
-+ * are packed into a single .notes section, which is mapped into the
-+ * PT_NOTE segment.  Because notes for a given name are grouped into
-+ * the same section, they'll all be adjacent the output file.
-+ *
-+ * This file defines macros for both C and assembler use.  Their
-+ * syntax is slightly different, but they're semantically similar.
-+ *
-+ * See the ELF specification for more detail about ELF notes.
-+ */
 +
-+#ifdef __ASSEMBLER__
-+/*
-+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
-+ * turn out to be the same size and shape), followed by the name and
-+ * desc data with appropriate padding.  The 'desctype' argument is the
-+ * assembler pseudo op defining the type of the data e.g. .asciz while
-+ * 'descdata' is the data itself e.g.  "hello, world".
-+ *
-+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
-+ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
-+ */
-+#ifdef __STDC__
-+#define ELFNOTE(name, type, desctype, descdata...) \
-+.pushsection .note.name			;	\
-+  .align 4				;	\
-+  .long 2f - 1f		/* namesz */	;	\
-+  .long 4f - 3f		/* descsz */	;	\
-+  .long type				;	\
-+1:.asciz #name				;	\
-+2:.align 4				;	\
-+3:desctype descdata			;	\
-+4:.align 4				;	\
-+.popsection
-+#else /* !__STDC__, i.e. -traditional */
-+#define ELFNOTE(name, type, desctype, descdata) \
-+.pushsection .note.name			;	\
-+  .align 4				;	\
-+  .long 2f - 1f		/* namesz */	;	\
-+  .long 4f - 3f		/* descsz */	;	\
-+  .long type				;	\
-+1:.asciz "name"				;	\
-+2:.align 4				;	\
-+3:desctype descdata			;	\
-+4:.align 4				;	\
-+.popsection
-+#endif /* __STDC__ */
-+#else	/* !__ASSEMBLER__ */
-+#include <linux/elf.h>
 +/*
-+ * Use an anonymous structure which matches the shape of
-+ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
-+ * type of name and desc depend on the macro arguments.  "name" must
-+ * be a literal string, and "desc" must be passed by value.  You may
-+ * only define one note per line, since __LINE__ is used to generate
-+ * unique symbols.
++ * HPET
 + */
-+#define _ELFNOTE_PASTE(a,b)	a##b
-+#define _ELFNOTE(size, name, unique, type, desc)			\
-+	static const struct {						\
-+		struct elf##size##_note _nhdr;				\
-+		unsigned char _name[sizeof(name)]			\
-+		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
-+		typeof(desc) _desc					\
-+			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
-+	} _ELFNOTE_PASTE(_note_, unique)				\
-+		__attribute_used__					\
-+		__attribute__((section(".note." name),			\
-+			       aligned(sizeof(Elf##size##_Word)),	\
-+			       unused)) = {				\
-+		{							\
-+			sizeof(name),					\
-+			sizeof(desc),					\
-+			type,						\
-+		},							\
-+		name,							\
-+		desc							\
-+	}
-+#define ELFNOTE(size, name, type, desc)		\
-+	_ELFNOTE(size, name, __LINE__, type, desc)
-+
-+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
-+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
-+#endif	/* __ASSEMBLER__ */
 +
-+#endif /* _LINUX_ELFNOTE_H */
-diff -Nurp pristine-linux-2.6.18.2/include/linux/eventpoll.h tmp-linux-2.6-xen.patch/include/linux/eventpoll.h
---- pristine-linux-2.6.18.2/include/linux/eventpoll.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/eventpoll.h	2007-10-14 01:51:15.000000000 +0200
-@@ -90,6 +90,12 @@ static inline void eventpoll_release(str
- 	eventpoll_release_file(file);
- }
- 
-+/*
-+ * called by aio code to create fd that can poll the  aio event queueQ
-+ */
-+struct eventpoll;
-+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
-+             struct eventpoll *ep, const struct file_operations *fops);
- #else
- 
- static inline void eventpoll_init_file(struct file *file) {}
-diff -Nurp pristine-linux-2.6.18.2/include/linux/gfp.h tmp-linux-2.6-xen.patch/include/linux/gfp.h
---- pristine-linux-2.6.18.2/include/linux/gfp.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/gfp.h	2007-07-30 16:35:13.000000000 +0200
-@@ -99,7 +99,11 @@ static inline int gfp_zone(gfp_t gfp)
-  */
- 
- #ifndef HAVE_ARCH_FREE_PAGE
--static inline void arch_free_page(struct page *page, int order) { }
-+/*
-+ * If arch_free_page returns non-zero then the generic free_page code can
-+ * immediately bail: the arch-specific function has done all the work.
-+ */
-+static inline int arch_free_page(struct page *page, int order) { return 0; }
- #endif
- 
- extern struct page *
-diff -Nurp pristine-linux-2.6.18.2/include/linux/highmem.h tmp-linux-2.6-xen.patch/include/linux/highmem.h
---- pristine-linux-2.6.18.2/include/linux/highmem.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/highmem.h	2007-07-30 16:35:13.000000000 +0200
-@@ -24,10 +24,16 @@ static inline void flush_kernel_dcache_p
- 
- /* declarations for linux/mm/highmem.c */
- unsigned int nr_free_highpages(void);
-+#ifdef CONFIG_XEN
-+void kmap_flush_unused(void);
-+#endif
- 
- #else /* CONFIG_HIGHMEM */
- 
- static inline unsigned int nr_free_highpages(void) { return 0; }
-+#ifdef CONFIG_XEN
-+static inline void kmap_flush_unused(void) { }
-+#endif
- 
- static inline void *kmap(struct page *page)
- {
-diff -Nurp pristine-linux-2.6.18.2/include/linux/interrupt.h tmp-linux-2.6-xen.patch/include/linux/interrupt.h
---- pristine-linux-2.6.18.2/include/linux/interrupt.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/interrupt.h	2007-07-30 16:35:13.000000000 +0200
-@@ -166,6 +166,12 @@ static inline int disable_irq_wake(unsig
- 
- #endif /* CONFIG_GENERIC_HARDIRQS */
- 
-+#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
-+int irq_ignore_unhandled(unsigned int irq);
-+#else
-+#define irq_ignore_unhandled(irq) 0
-+#endif
++#define HPET_TIMER_NUM     3    /* 3 timers supported now */
++struct hvm_hw_hpet {
++    /* Memory-mapped, software visible registers */
++    uint64_t capability;        /* capabilities */
++    uint64_t res0;              /* reserved */
++    uint64_t config;            /* configuration */
++    uint64_t res1;              /* reserved */
++    uint64_t isr;               /* interrupt status reg */
++    uint64_t res2[25];          /* reserved */
++    uint64_t mc64;              /* main counter */
++    uint64_t res3;              /* reserved */
++    struct {                    /* timers */
++        uint64_t config;        /* configuration/cap */
++        uint64_t cmp;           /* comparator */
++        uint64_t fsb;           /* FSB route, not supported now */
++        uint64_t res4;          /* reserved */
++    } timers[HPET_TIMER_NUM];
++    uint64_t res5[4*(24-HPET_TIMER_NUM)];  /* reserved, up to 0x3ff */
 +
- #ifndef __ARCH_SET_SOFTIRQ_PENDING
- #define set_softirq_pending(x) (local_softirq_pending() = (x))
- #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
-diff -Nurp pristine-linux-2.6.18.2/include/linux/kexec.h tmp-linux-2.6-xen.patch/include/linux/kexec.h
---- pristine-linux-2.6.18.2/include/linux/kexec.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/kexec.h	2007-07-30 16:35:13.000000000 +0200
-@@ -31,6 +31,13 @@
- #error KEXEC_ARCH not defined
- #endif
- 
-+#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page)  page_to_pfn(page)
-+#define kexec_pfn_to_page(pfn)   pfn_to_page(pfn)
-+#define kexec_virt_to_phys(addr) virt_to_phys(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(addr)
-+#endif
++    /* Hidden register state */
++    uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
++};
 +
- /*
-  * This structure is used to hold the arguments that are used when loading
-  * kernel binaries.
-@@ -91,6 +98,12 @@ struct kimage {
- extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
- extern int machine_kexec_prepare(struct kimage *image);
- extern void machine_kexec_cleanup(struct kimage *image);
-+#ifdef CONFIG_XEN
-+extern int xen_machine_kexec_load(struct kimage *image);
-+extern void xen_machine_kexec_unload(struct kimage *image);
-+extern void xen_machine_kexec_setup_resources(void);
-+extern void xen_machine_kexec_register_resources(struct resource *res);
-+#endif
- extern asmlinkage long sys_kexec_load(unsigned long entry,
- 					unsigned long nr_segments,
- 					struct kexec_segment __user *segments,
-diff -Nurp pristine-linux-2.6.18.2/include/linux/mm.h tmp-linux-2.6-xen.patch/include/linux/mm.h
---- pristine-linux-2.6.18.2/include/linux/mm.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/mm.h	2007-07-30 16:35:13.000000000 +0200
-@@ -164,6 +164,9 @@ extern unsigned int kobjsize(const void 
- #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
- #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
- #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
-+#ifdef CONFIG_XEN
-+#define VM_FOREIGN	0x04000000	/* Has pages belonging to another VM */
-+#endif
- 
- #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
- #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-@@ -202,6 +205,10 @@ struct vm_operations_struct {
- 	/* notification that a previously read-only page is about to become
- 	 * writable, if an error is returned it will cause a SIGBUS */
- 	int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
-+	/* Area-specific function for clearing the PTE at @ptep. Returns the
-+	 * original value of @ptep. */
-+	pte_t (*zap_pte)(struct vm_area_struct *vma, 
-+			 unsigned long addr, pte_t *ptep, int is_fullmm);
- #ifdef CONFIG_NUMA
- 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
- 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
-@@ -1027,6 +1034,13 @@ struct page *follow_page(struct vm_area_
- #define FOLL_GET	0x04	/* do get_page on page */
- #define FOLL_ANON	0x08	/* give ZERO_PAGE if no pgtable */
- 
-+#ifdef CONFIG_XEN
-+typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
-+			void *data);
-+extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
-+			       unsigned long size, pte_fn_t fn, void *data);
-+#endif
++DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
 +
- #ifdef CONFIG_PROC_FS
- void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
- #else
-diff -Nurp pristine-linux-2.6.18.2/include/linux/oprofile.h tmp-linux-2.6-xen.patch/include/linux/oprofile.h
---- pristine-linux-2.6.18.2/include/linux/oprofile.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/oprofile.h	2007-10-14 01:51:15.000000000 +0200
-@@ -16,6 +16,8 @@
- #include <linux/types.h>
- #include <linux/spinlock.h>
- #include <asm/atomic.h>
 +
-+#include <xen/interface/xenoprof.h>
-  
- struct super_block;
- struct dentry;
-@@ -27,6 +29,11 @@ struct oprofile_operations {
- 	/* create any necessary configuration files in the oprofile fs.
- 	 * Optional. */
- 	int (*create_files)(struct super_block * sb, struct dentry * root);
-+	/* setup active domains with Xen */
-+	int (*set_active)(int *active_domains, unsigned int adomains);
-+        /* setup passive domains with Xen */
-+        int (*set_passive)(int *passive_domains, unsigned int pdomains);
-+	
- 	/* Do any necessary interrupt setup. Optional. */
- 	int (*setup)(void);
- 	/* Do any necessary interrupt shutdown. Optional. */
-@@ -78,6 +85,8 @@ void oprofile_add_pc(unsigned long pc, i
- /* add a backtrace entry, to be called from the ->backtrace callback */
- void oprofile_add_trace(unsigned long eip);
- 
-+/* add a domain switch entry */
-+int oprofile_add_domain_switch(int32_t domain_id);
- 
- /**
-  * Create a file of the given name as a child of the given root, with
-diff -Nurp pristine-linux-2.6.18.2/include/linux/page-flags.h tmp-linux-2.6-xen.patch/include/linux/page-flags.h
---- pristine-linux-2.6.18.2/include/linux/page-flags.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/page-flags.h	2007-07-30 16:35:13.000000000 +0200
-@@ -98,6 +98,8 @@
- #define PG_uncached		31	/* Page has been mapped as uncached */
- #endif
- 
-+#define PG_foreign		20	/* Page is owned by foreign allocator. */
++/*
++ * PM timer
++ */
 +
- /*
-  * Manipulation of page state flags
-  */
-@@ -247,6 +249,18 @@
- #define SetPageUncached(page)	set_bit(PG_uncached, &(page)->flags)
- #define ClearPageUncached(page)	clear_bit(PG_uncached, &(page)->flags)
- 
-+#define PageForeign(page)	test_bit(PG_foreign, &(page)->flags)
-+#define SetPageForeign(page, dtor) do {		\
-+	set_bit(PG_foreign, &(page)->flags);	\
-+	(page)->index = (long)(dtor);		\
-+} while (0)
-+#define ClearPageForeign(page) do {		\
-+	clear_bit(PG_foreign, &(page)->flags);	\
-+	(page)->index = 0;			\
-+} while (0)
-+#define PageForeignDestructor(page)		\
-+	( (void (*) (struct page *)) (page)->index )(page)
++struct hvm_hw_pmtimer {
++    uint32_t tmr_val;   /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++    uint16_t pm1a_sts;  /* PM1a_EVT_BLK.PM1a_STS: status register */
++    uint16_t pm1a_en;   /* PM1a_EVT_BLK.PM1a_EN: enable register */
++};
 +
- struct page;	/* forward declaration */
- 
- int test_clear_page_dirty(struct page *page);
-diff -Nurp pristine-linux-2.6.18.2/include/linux/sched.h tmp-linux-2.6-xen.patch/include/linux/sched.h
---- pristine-linux-2.6.18.2/include/linux/sched.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/sched.h	2007-10-14 01:51:15.000000000 +0200
-@@ -211,10 +211,15 @@ extern void update_process_times(int use
- extern void scheduler_tick(void);
- 
- #ifdef CONFIG_DETECT_SOFTLOCKUP
-+extern unsigned long softlockup_get_next_event(void);
- extern void softlockup_tick(void);
- extern void spawn_softlockup_task(void);
- extern void touch_softlockup_watchdog(void);
- #else
-+static inline unsigned long softlockup_get_next_event(void)
-+{
-+	return MAX_JIFFY_OFFSET;
-+}
- static inline void softlockup_tick(void)
- {
- }
-diff -Nurp pristine-linux-2.6.18.2/include/linux/skbuff.h tmp-linux-2.6-xen.patch/include/linux/skbuff.h
---- pristine-linux-2.6.18.2/include/linux/skbuff.h	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/linux/skbuff.h	2007-07-30 16:35:13.000000000 +0200
-@@ -203,6 +203,8 @@ enum {
-  *	@local_df: allow local fragmentation
-  *	@cloned: Head may be cloned (check refcnt to be sure)
-  *	@nohdr: Payload reference only, must not modify header
-+ *	@proto_data_valid: Protocol data validated since arriving at localhost
-+ *	@proto_csum_blank: Protocol csum must be added before leaving localhost
-  *	@pkt_type: Packet class
-  *	@fclone: skbuff clone status
-  *	@ip_summed: Driver fed us an IP checksum
-@@ -282,7 +284,13 @@ struct sk_buff {
- 				nfctinfo:3;
- 	__u8			pkt_type:3,
- 				fclone:2,
-+#ifndef CONFIG_XEN
- 				ipvs_property:1;
-+#else
-+				ipvs_property:1,
-+				proto_data_valid:1,
-+				proto_csum_blank:1;
-+#endif
- 	__be16			protocol;
- 
- 	void			(*destructor)(struct sk_buff *skb);
-diff -Nurp pristine-linux-2.6.18.2/include/xen/balloon.h tmp-linux-2.6-xen.patch/include/xen/balloon.h
---- pristine-linux-2.6.18.2/include/xen/balloon.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/balloon.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,57 @@
++DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
++
++/*
++ * MTRR MSRs
++ */
++
++struct hvm_hw_mtrr {
++#define MTRR_VCNT 8
++#define NUM_FIXED_MSR 11
++    uint64_t msr_pat_cr;
++    /* mtrr physbase & physmask msr pair*/
++    uint64_t msr_mtrr_var[MTRR_VCNT*2];
++    uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
++    uint64_t msr_mtrr_cap;
++    uint64_t msr_mtrr_def_type;
++};
++
++DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
++
++/* 
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX 14
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/xen-mca.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/xen-mca.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,279 @@
 +/******************************************************************************
-+ * balloon.h
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * arch-x86/mca.h
 + * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
++ * Contributed by Advanced Micro Devices, Inc.
++ * Author: Christoph Egger <Christoph.Egger at amd.com>
++ *
++ * Guest OS machine check interface to x86 Xen.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ * 
++ *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
 + */
 +
-+#ifndef __ASM_BALLOON_H__
-+#define __ASM_BALLOON_H__
++/* Full MCA functionality has the following Usecases from the guest side:
++ *
++ * Must have's:
++ * 1. Dom0 and DomU register machine check trap callback handlers
++ *    (already done via "set_trap_table" hypercall)
++ * 2. Dom0 registers machine check event callback handler
++ *    (doable via EVTCHNOP_bind_virq)
++ * 3. Dom0 and DomU fetches machine check data
++ * 4. Dom0 wants Xen to notify a DomU
++ * 5. Dom0 gets DomU ID from physical address
++ * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy")
++ *
++ * Nice to have's:
++ * 7. Dom0 wants Xen to deactivate a physical CPU
++ *    This is better done as separate task, physical CPU hotplugging,
++ *    and hypercall(s) should be sysctl's
++ * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to
++ *    move a DomU (or Dom0 itself) away from a malicious page
++ *    producing correctable errors.
++ * 9. offlining physical page:
++ *    Xen free's and never re-uses a certain physical page.
++ * 10. Testfacility: Allow Dom0 to write values into machine check MSR's
++ *     and tell Xen to trigger a machine check
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
++#define __XEN_PUBLIC_ARCH_X86_MCA_H__
++
++/* Hypercall */
++#define __HYPERVISOR_mca __HYPERVISOR_arch_0
++
++#define XEN_MCA_INTERFACE_VERSION 0x03000001
++
++/* IN: Dom0 calls hypercall from MC event handler. */
++#define XEN_MC_CORRECTABLE  0x0
++/* IN: Dom0/DomU calls hypercall from MC trap handler. */
++#define XEN_MC_TRAP         0x1
++/* XEN_MC_CORRECTABLE and XEN_MC_TRAP are mutually exclusive. */
++
++/* OUT: All is ok */
++#define XEN_MC_OK           0x0
++/* OUT: Domain could not fetch data. */
++#define XEN_MC_FETCHFAILED  0x1
++/* OUT: There was no machine check data to fetch. */
++#define XEN_MC_NODATA       0x2
++/* OUT: Between notification time and this hypercall an other
++ *  (most likely) correctable error happened. The fetched data,
++ *  does not match the original machine check data. */
++#define XEN_MC_NOMATCH      0x4
++
++/* OUT: DomU did not register MC NMI handler. Try something else. */
++#define XEN_MC_CANNOTHANDLE 0x8
++/* OUT: Notifying DomU failed. Retry later or try something else. */
++#define XEN_MC_NOTDELIVERED 0x10
++/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */
 +
-+/*
-+ * Inform the balloon driver that it should allow some slop for device-driver
-+ * memory activities.
-+ */
-+void balloon_update_driver_allowance(long delta);
 +
-+/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
-+struct page **alloc_empty_pages_and_pagevec(int nr_pages);
-+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
++#ifndef __ASSEMBLY__
 +
-+void balloon_release_driver_page(struct page *page);
++#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */
 +
 +/*
-+ * Prevent the balloon driver from changing the memory reservation during
-+ * a driver critical region.
++ * Machine Check Architecure:
++ * structs are read-only and used to report all kinds of
++ * correctable and uncorrectable errors detected by the HW.
++ * Dom0 and DomU: register a handler to get notified.
++ * Dom0 only: Correctable errors are reported via VIRQ_MCA
++ * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers
 + */
-+extern spinlock_t balloon_lock;
-+#define balloon_lock(__flags)   spin_lock_irqsave(&balloon_lock, __flags)
-+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
++#define MC_TYPE_GLOBAL          0
++#define MC_TYPE_BANK            1
++#define MC_TYPE_EXTENDED        2
 +
-+#endif /* __ASM_BALLOON_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/blkif.h tmp-linux-2.6-xen.patch/include/xen/blkif.h
---- pristine-linux-2.6.18.2/include/xen/blkif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/blkif.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,97 @@
-+#ifndef __XEN_BLKIF_H__
-+#define __XEN_BLKIF_H__
++struct mcinfo_common {
++    uint16_t type;      /* structure type */
++    uint16_t size;      /* size of this struct in bytes */
++};
 +
-+#include <xen/interface/io/ring.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/protocols.h>
 +
-+/* Not a real protocol.  Used to generate ring structs which contain
-+ * the elements common to all protocols only.  This way we get a
-+ * compiler-checkable way to use common struct elements, so we can
-+ * avoid using switch(protocol) in a number of places.  */
-+struct blkif_common_request {
-+	char dummy;
++#define MC_FLAG_CORRECTABLE     (1 << 0)
++#define MC_FLAG_UNCORRECTABLE   (1 << 1)
++
++/* contains global x86 mc information */
++struct mcinfo_global {
++    struct mcinfo_common common;
++
++    /* running domain at the time in error (most likely the impacted one) */
++    uint16_t mc_domid;
++    uint32_t mc_socketid; /* physical socket of the physical core */
++    uint16_t mc_coreid; /* physical impacted core */
++    uint16_t mc_core_threadid; /* core thread of physical core */
++    uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
++    uint64_t mc_gstatus; /* global status */
++    uint32_t mc_flags;
 +};
-+struct blkif_common_response {
-+	char dummy;
++
++/* contains bank local x86 mc information */
++struct mcinfo_bank {
++    struct mcinfo_common common;
++
++    uint16_t mc_bank; /* bank nr */
++    uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0
++                        * and if mc_addr is valid. Never valid on DomU. */
++    uint64_t mc_status; /* bank status */
++    uint64_t mc_addr;   /* bank address, only valid
++                         * if addr bit is set in mc_status */
++    uint64_t mc_misc;
 +};
 +
-+/* i386 protocol version */
-+#pragma pack(push, 4)
-+struct blkif_x86_32_request {
-+	uint8_t        operation;    /* BLKIF_OP_???                         */
-+	uint8_t        nr_segments;  /* number of segments                   */
-+	blkif_vdev_t   handle;       /* only for read/write requests         */
-+	uint64_t       id;           /* private guest value, echoed in resp  */
-+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-+	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++
++struct mcinfo_msr {
++    uint64_t reg;   /* MSR */
++    uint64_t value; /* MSR value */
 +};
-+struct blkif_x86_32_response {
-+	uint64_t        id;              /* copied from request */
-+	uint8_t         operation;       /* copied from request */
-+	int16_t         status;          /* BLKIF_RSP_???       */
++
++/* contains mc information from other
++ * or additional mc MSRs */ 
++struct mcinfo_extended {
++    struct mcinfo_common common;
++
++    /* You can fill up to five registers.
++     * If you need more, then use this structure
++     * multiple times. */
++
++    uint32_t mc_msrs; /* Number of msr with valid values. */
++    struct mcinfo_msr mc_msr[5];
 +};
-+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
-+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
-+#pragma pack(pop)
 +
-+/* x86_64 protocol version */
-+struct blkif_x86_64_request {
-+	uint8_t        operation;    /* BLKIF_OP_???                         */
-+	uint8_t        nr_segments;  /* number of segments                   */
-+	blkif_vdev_t   handle;       /* only for read/write requests         */
-+	uint64_t       __attribute__((__aligned__(8))) id;
-+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-+	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++#define MCINFO_HYPERCALLSIZE	1024
++#define MCINFO_MAXSIZE		768
++
++struct mc_info {
++    /* Number of mcinfo_* entries in mi_data */
++    uint32_t mi_nentries;
++
++    uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)];
 +};
-+struct blkif_x86_64_response {
-+	uint64_t       __attribute__((__aligned__(8))) id;
-+	uint8_t         operation;       /* copied from request */
-+	int16_t         status;          /* BLKIF_RSP_???       */
++typedef struct mc_info mc_info_t;
++
++
++
++/* 
++ * OS's should use these instead of writing their own lookup function
++ * each with its own bugs and drawbacks.
++ * We use macros instead of static inline functions to allow guests
++ * to include this header in assembly files (*.S).
++ */
++/* Prototype:
++ *    uint32_t x86_mcinfo_nentries(struct mc_info *mi);
++ */
++#define x86_mcinfo_nentries(_mi)    \
++    (_mi)->mi_nentries
++/* Prototype:
++ *    struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
++ */
++#define x86_mcinfo_first(_mi)       \
++    (struct mcinfo_common *)((_mi)->mi_data)
++/* Prototype:
++ *    struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
++ */
++#define x86_mcinfo_next(_mic)       \
++    (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)
++
++/* Prototype:
++ *    void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
++ */
++#define x86_mcinfo_lookup(_ret, _mi, _type)    \
++    do {                                                        \
++        uint32_t found, i;                                      \
++        struct mcinfo_common *_mic;                             \
++                                                                \
++        found = 0;                                              \
++	(_ret) = NULL;						\
++	if (_mi == NULL) break;					\
++        _mic = x86_mcinfo_first(_mi);                           \
++        for (i = 0; i < x86_mcinfo_nentries(_mi); i++) {        \
++            if (_mic->type == (_type)) {                        \
++                found = 1;                                      \
++                break;                                          \
++            }                                                   \
++            _mic = x86_mcinfo_next(_mic);                       \
++        }                                                       \
++        (_ret) = found ? _mic : NULL;                           \
++    } while (0)
++
++
++/* Usecase 1
++ * Register machine check trap callback handler
++ *    (already done via "set_trap_table" hypercall)
++ */
++
++/* Usecase 2
++ * Dom0 registers machine check event callback handler
++ * done by EVTCHNOP_bind_virq
++ */
++
++/* Usecase 3
++ * Fetch machine check data from hypervisor.
++ * Note, this hypercall is special, because both Dom0 and DomU must use this.
++ */
++#define XEN_MC_fetch            1
++struct xen_mc_fetch {
++    /* IN/OUT variables. */
++    uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */
++
++    /* OUT variables. */
++    uint32_t fetch_idx;  /* only useful for Dom0 for the notify hypercall */
++    struct mc_info mc_info;
 +};
-+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
-+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
++typedef struct xen_mc_fetch xen_mc_fetch_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
 +
-+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
-+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
-+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
 +
-+union blkif_back_rings {
-+	blkif_back_ring_t        native;
-+	blkif_common_back_ring_t common;
-+	blkif_x86_32_back_ring_t x86_32;
-+	blkif_x86_64_back_ring_t x86_64;
++/* Usecase 4
++ * This tells the hypervisor to notify a DomU about the machine check error
++ */
++#define XEN_MC_notifydomain     2
++struct xen_mc_notifydomain {
++    /* IN variables. */
++    uint16_t mc_domid;    /* The unprivileged domain to notify. */
++    uint16_t mc_vcpuid;   /* The vcpu in mc_domid to notify.
++                           * Usually echo'd value from the fetch hypercall. */
++    uint32_t fetch_idx;   /* echo'd value from the fetch hypercall. */
++
++    /* IN/OUT variables. */
++    uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */
 +};
-+typedef union blkif_back_rings blkif_back_rings_t;
++typedef struct xen_mc_notifydomain xen_mc_notifydomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
 +
-+enum blkif_protocol {
-+	BLKIF_PROTOCOL_NATIVE = 1,
-+	BLKIF_PROTOCOL_X86_32 = 2,
-+	BLKIF_PROTOCOL_X86_64 = 3,
++
++struct xen_mc {
++    uint32_t cmd;
++    uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
++    union {
++        struct xen_mc_fetch        mc_fetch;
++        struct xen_mc_notifydomain mc_notifydomain;
++        uint8_t pad[MCINFO_HYPERCALLSIZE];
++    } u;
 +};
++typedef struct xen_mc xen_mc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_t);
 +
-+static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
-+{
-+	int i;
-+	dst->operation = src->operation;
-+	dst->nr_segments = src->nr_segments;
-+	dst->handle = src->handle;
-+	dst->id = src->id;
-+	dst->sector_number = src->sector_number;
-+	for (i = 0; i < src->nr_segments; i++)
-+		dst->seg[i] = src->seg[i];
-+}
++#endif /* __ASSEMBLY__ */
 +
-+static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
-+{
-+	int i;
-+	dst->operation = src->operation;
-+	dst->nr_segments = src->nr_segments;
-+	dst->handle = src->handle;
-+	dst->id = src->id;
-+	dst->sector_number = src->sector_number;
-+	for (i = 0; i < src->nr_segments; i++)
-+		dst->seg[i] = src->seg[i];
-+}
++#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/xen-x86_32.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/xen-x86_32.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,180 @@
++/******************************************************************************
++ * xen-x86_32.h
++ * 
++ * Guest OS interface to x86 32-bit Xen.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2007, K A Fraser
++ */
 +
-+#endif /* __XEN_BLKIF_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/cpu_hotplug.h tmp-linux-2.6-xen.patch/include/xen/cpu_hotplug.h
---- pristine-linux-2.6.18.2/include/xen/cpu_hotplug.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/cpu_hotplug.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,41 @@
-+#ifndef __XEN_CPU_HOTPLUG_H__
-+#define __XEN_CPU_HOTPLUG_H__
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
 +
-+#include <linux/kernel.h>
-+#include <linux/cpumask.h>
++/*
++ * Hypercall interface:
++ *  Input:  %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
++ *  Output: %eax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ *  call hypercall_page + hypercall-number * 32
++ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
++ */
 +
-+#if defined(CONFIG_X86) && defined(CONFIG_SMP)
-+extern cpumask_t cpu_initialized_map;
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ *  mov $hypercall-number*32,%eax ; int $0x82
++ */
++#define TRAP_INSTR "int $0x82"
 +#endif
 +
-+#if defined(CONFIG_HOTPLUG_CPU)
++/*
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++#define FLAT_RING1_CS 0xe019    /* GDT index 259 */
++#define FLAT_RING1_DS 0xe021    /* GDT index 260 */
++#define FLAT_RING1_SS 0xe021    /* GDT index 260 */
++#define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
++#define FLAT_RING3_DS 0xe033    /* GDT index 262 */
++#define FLAT_RING3_SS 0xe033    /* GDT index 262 */
 +
-+int cpu_up_check(unsigned int cpu);
-+void init_xenbus_allowed_cpumask(void);
-+int smp_suspend(void);
-+void smp_resume(void);
++#define FLAT_KERNEL_CS FLAT_RING1_CS
++#define FLAT_KERNEL_DS FLAT_RING1_DS
++#define FLAT_KERNEL_SS FLAT_RING1_SS
++#define FLAT_USER_CS    FLAT_RING3_CS
++#define FLAT_USER_DS    FLAT_RING3_DS
++#define FLAT_USER_SS    FLAT_RING3_SS
 +
-+void cpu_bringup(void);
++#define __HYPERVISOR_VIRT_START_PAE    0xF5800000
++#define __MACH2PHYS_VIRT_START_PAE     0xF5800000
++#define __MACH2PHYS_VIRT_END_PAE       0xF6800000
++#define HYPERVISOR_VIRT_START_PAE      \
++    mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_START_PAE       \
++    mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_END_PAE         \
++    mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
 +
-+#else /* !defined(CONFIG_HOTPLUG_CPU) */
++/* Non-PAE bounds are obsolete. */
++#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
++#define __MACH2PHYS_VIRT_START_NONPAE  0xFC000000
++#define __MACH2PHYS_VIRT_END_NONPAE    0xFC400000
++#define HYPERVISOR_VIRT_START_NONPAE   \
++    mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_START_NONPAE    \
++    mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_END_NONPAE      \
++    mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
 +
-+#define cpu_up_check(cpu)		(0)
-+#define init_xenbus_allowed_cpumask()	((void)0)
++#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_START  __MACH2PHYS_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_END    __MACH2PHYS_VIRT_END_PAE
 +
-+static inline int smp_suspend(void)
-+{
-+	if (num_online_cpus() > 1) {
-+		printk(KERN_WARNING "Can't suspend SMP guests "
-+		       "without CONFIG_HOTPLUG_CPU\n");
-+		return -EOPNOTSUPP;
-+	}
-+	return 0;
-+}
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#endif
 +
-+static inline void smp_resume(void)
-+{
-+}
++#define MACH2PHYS_VIRT_START  mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END    mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES  ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
++#endif
 +
-+#endif /* !defined(CONFIG_HOTPLUG_CPU) */
++/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#undef ___DEFINE_XEN_GUEST_HANDLE
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type)                  \
++    typedef struct { type *p; }                                 \
++        __guest_handle_ ## name;                                \
++    typedef struct { union { type *p; uint64_aligned_t q; }; }  \
++        __guest_handle_64_ ## name
++#undef set_xen_guest_handle
++#define set_xen_guest_handle(hnd, val)                      \
++    do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0;   \
++         (hnd).p = val;                                     \
++    } while ( 0 )
++#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
++#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
++#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
++#endif
 +
-+#endif /* __XEN_CPU_HOTPLUG_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/driver_util.h tmp-linux-2.6-xen.patch/include/xen/driver_util.h
---- pristine-linux-2.6.18.2/include/xen/driver_util.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/driver_util.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,14 @@
++#ifndef __ASSEMBLY__
 +
-+#ifndef __ASM_XEN_DRIVER_UTIL_H__
-+#define __ASM_XEN_DRIVER_UTIL_H__
++struct cpu_user_regs {
++    uint32_t ebx;
++    uint32_t ecx;
++    uint32_t edx;
++    uint32_t esi;
++    uint32_t edi;
++    uint32_t ebp;
++    uint32_t eax;
++    uint16_t error_code;    /* private */
++    uint16_t entry_vector;  /* private */
++    uint32_t eip;
++    uint16_t cs;
++    uint8_t  saved_upcall_mask;
++    uint8_t  _pad0;
++    uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
++    uint32_t esp;
++    uint16_t ss, _pad1;
++    uint16_t es, _pad2;
++    uint16_t ds, _pad3;
++    uint16_t fs, _pad4;
++    uint16_t gs, _pad5;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++/*
++ * Page-directory addresses above 4GB do not fit into architectural %cr3.
++ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
++ * must use the following accessor macros to pack/unpack valid MFNs.
++ */
++#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
++#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
++
++struct arch_vcpu_info {
++    unsigned long cr2;
++    unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
 +
-+#include <linux/vmalloc.h>
-+#include <linux/device.h>
++struct xen_callback {
++    unsigned long cs;
++    unsigned long eip;
++};
++typedef struct xen_callback xen_callback_t;
 +
-+/* Allocate/destroy a 'vmalloc' VM area. */
-+extern struct vm_struct *alloc_vm_area(unsigned long size);
-+extern void free_vm_area(struct vm_struct *area);
++#endif /* !__ASSEMBLY__ */
 +
-+extern struct class *get_xen_class(void);
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
 +
-+#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/evtchn.h tmp-linux-2.6-xen.patch/include/xen/evtchn.h
---- pristine-linux-2.6.18.2/include/xen/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/evtchn.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,126 @@
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/xen-x86_64.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/xen-x86_64.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,212 @@
 +/******************************************************************************
-+ * evtchn.h
-+ * 
-+ * Communication via Xen event channels.
-+ * Also definitions for the device that demuxes notifications to userspace.
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
++ * xen-x86_64.h
 + * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
++ * Guest OS interface to x86 64-bit Xen.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ * 
++ *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
 + */
 +
-+#ifndef __ASM_EVTCHN_H__
-+#define __ASM_EVTCHN_H__
-+
-+#include <linux/interrupt.h>
-+#include <asm/hypervisor.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/interface/event_channel.h>
-+#include <linux/smp.h>
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
 +
 +/*
-+ * LOW-LEVEL DEFINITIONS
++ * Hypercall interface:
++ *  Input:  %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
++ *  Output: %rax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ *  call hypercall_page + hypercall-number * 32
++ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
 + */
 +
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
 +/*
-+ * Dynamically bind an event source to an IRQ-like callback handler.
-+ * On some platforms this may not be implemented via the Linux IRQ subsystem.
-+ * The IRQ argument passed to the callback handler is the same as returned
-+ * from the bind call. It may not correspond to a Linux IRQ number.
-+ * Returns IRQ or negative errno.
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ *  mov $hypercall-number*32,%eax ; syscall
++ * Clobbered: %rcx, %r11, argument registers (as above)
 + */
-+int bind_caller_port_to_irqhandler(
-+	unsigned int caller_port,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+int bind_listening_port_to_irqhandler(
-+	unsigned int remote_domain,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+int bind_interdomain_evtchn_to_irqhandler(
-+	unsigned int remote_domain,
-+	unsigned int remote_port,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+int bind_virq_to_irqhandler(
-+	unsigned int virq,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+int bind_ipi_to_irqhandler(
-+	unsigned int ipi,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
++#define TRAP_INSTR "syscall"
++#endif
 +
 +/*
-+ * Common unbind function for all event sources. Takes IRQ to unbind from.
-+ * Automatically closes the underlying event channel (except for bindings
-+ * made with bind_caller_port_to_irqhandler()).
++ * 64-bit segment selectors
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
 + */
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 +
-+void irq_resume(void);
++#define FLAT_RING3_CS32 0xe023  /* GDT index 260 */
++#define FLAT_RING3_CS64 0xe033  /* GDT index 261 */
++#define FLAT_RING3_DS32 0xe02b  /* GDT index 262 */
++#define FLAT_RING3_DS64 0x0000  /* NULL selector */
++#define FLAT_RING3_SS32 0xe02b  /* GDT index 262 */
++#define FLAT_RING3_SS64 0xe02b  /* GDT index 262 */
 +
-+/* Entry point for notifications into Linux subsystems. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
++#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
++#define FLAT_KERNEL_DS   FLAT_KERNEL_DS64
++#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
++#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
++#define FLAT_KERNEL_CS   FLAT_KERNEL_CS64
++#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
++#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
++#define FLAT_KERNEL_SS   FLAT_KERNEL_SS64
 +
-+/* Entry point for notifications into the userland character device. */
-+void evtchn_device_upcall(int port);
++#define FLAT_USER_DS64 FLAT_RING3_DS64
++#define FLAT_USER_DS32 FLAT_RING3_DS32
++#define FLAT_USER_DS   FLAT_USER_DS64
++#define FLAT_USER_CS64 FLAT_RING3_CS64
++#define FLAT_USER_CS32 FLAT_RING3_CS32
++#define FLAT_USER_CS   FLAT_USER_CS64
++#define FLAT_USER_SS64 FLAT_RING3_SS64
++#define FLAT_USER_SS32 FLAT_RING3_SS32
++#define FLAT_USER_SS   FLAT_USER_SS64
 +
-+void mask_evtchn(int port);
-+void unmask_evtchn(int port);
++#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
++#define __HYPERVISOR_VIRT_END   0xFFFF880000000000
++#define __MACH2PHYS_VIRT_START  0xFFFF800000000000
++#define __MACH2PHYS_VIRT_END    0xFFFF804000000000
 +
-+static inline void clear_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	synch_clear_bit(port, s->evtchn_pending);
-+}
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#define HYPERVISOR_VIRT_END   mk_unsigned_long(__HYPERVISOR_VIRT_END)
++#endif
 +
-+static inline void notify_remote_via_evtchn(int port)
-+{
-+	struct evtchn_send send = { .port = port };
-+	(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
-+}
++#define MACH2PHYS_VIRT_START  mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END    mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES  ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
 +
 +/*
-+ * Use these to access the event channel underlying the IRQ handle returned
-+ * by bind_*_to_irqhandler().
++ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
++ *  @which == SEGBASE_*  ;  @base == 64-bit base address
++ * Returns 0 on success.
 + */
-+void notify_remote_via_irq(int irq);
-+int irq_to_evtchn_port(int irq);
++#define SEGBASE_FS          0
++#define SEGBASE_GS_USER     1
++#define SEGBASE_GS_KERNEL   2
++#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
 +
-+#endif /* __ASM_EVTCHN_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/features.h tmp-linux-2.6-xen.patch/include/xen/features.h
---- pristine-linux-2.6.18.2/include/xen/features.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/features.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,20 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Query the features reported by Xen.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
++/*
++ * int HYPERVISOR_iret(void)
++ * All arguments are on the kernel stack, in the following format.
++ * Never returns if successful. Current kernel context is lost.
++ * The saved CS is mapped as follows:
++ *   RING0 -> RING3 kernel mode.
++ *   RING1 -> RING3 kernel mode.
++ *   RING2 -> RING3 kernel mode.
++ *   RING3 -> RING3 user mode.
++ * However RING0 indicates that the guest kernel should return to iteself
++ * directly with
++ *      orb   $3,1*8(%rsp)
++ *      iretq
++ * If flags contains VGCF_in_syscall:
++ *   Restore RAX, RIP, RFLAGS, RSP.
++ *   Discard R11, RCX, CS, SS.
++ * Otherwise:
++ *   Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
++ * All other registers are saved on hypercall entry and restored to user.
 + */
++/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
++#define _VGCF_in_syscall 8
++#define VGCF_in_syscall  (1<<_VGCF_in_syscall)
++#define VGCF_IN_SYSCALL  VGCF_in_syscall
 +
-+#ifndef __ASM_XEN_FEATURES_H__
-+#define __ASM_XEN_FEATURES_H__
++#ifndef __ASSEMBLY__
 +
-+#include <xen/interface/version.h>
++struct iret_context {
++    /* Top of stack (%rsp at point of hypercall). */
++    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++    /* Bottom of iret stack frame. */
++};
 +
-+extern void setup_xen_features(void);
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
++/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
++#define __DECL_REG(name) union { \
++    uint64_t r ## name, e ## name; \
++    uint32_t _e ## name; \
++}
++#else
++/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
++#define __DECL_REG(name) uint64_t r ## name
++#endif
 +
-+extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
++struct cpu_user_regs {
++    uint64_t r15;
++    uint64_t r14;
++    uint64_t r13;
++    uint64_t r12;
++    __DECL_REG(bp);
++    __DECL_REG(bx);
++    uint64_t r11;
++    uint64_t r10;
++    uint64_t r9;
++    uint64_t r8;
++    __DECL_REG(ax);
++    __DECL_REG(cx);
++    __DECL_REG(dx);
++    __DECL_REG(si);
++    __DECL_REG(di);
++    uint32_t error_code;    /* private */
++    uint32_t entry_vector;  /* private */
++    __DECL_REG(ip);
++    uint16_t cs, _pad0[1];
++    uint8_t  saved_upcall_mask;
++    uint8_t  _pad1[3];
++    __DECL_REG(flags);      /* rflags.IF == !saved_upcall_mask */
++    __DECL_REG(sp);
++    uint16_t ss, _pad2[3];
++    uint16_t es, _pad3[3];
++    uint16_t ds, _pad4[3];
++    uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
++    uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
 +
-+#define xen_feature(flag)	(xen_features[flag])
++#undef __DECL_REG
 +
-+#endif /* __ASM_XEN_FEATURES_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/gnttab.h tmp-linux-2.6-xen.patch/include/xen/gnttab.h
---- pristine-linux-2.6.18.2/include/xen/gnttab.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/gnttab.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,138 @@
++#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
++#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
++
++struct arch_vcpu_info {
++    unsigned long cr2;
++    unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++typedef unsigned long xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86/xen.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86/xen.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,204 @@
 +/******************************************************************************
-+ * gnttab.h
-+ * 
-+ * Two sets of functionality:
-+ * 1. Granting foreign access to our memory reservation.
-+ * 2. Accessing others' memory reservations via grant references.
-+ * (i.e., mechanisms for both sender and recipient of grant references)
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Copyright (c) 2005, Christopher Clark
++ * arch-x86/xen.h
 + * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
++ * Guest OS interface to x86 Xen.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ * 
++ *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
 + */
 +
-+#ifndef __ASM_GNTTAB_H__
-+#define __ASM_GNTTAB_H__
++#include "../xen.h"
 +
-+#include <asm/hypervisor.h>
-+#include <asm/maddr.h> /* maddr_t */
-+#include <xen/interface/grant_table.h>
-+#include <xen/features.h>
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_H__
 +
-+struct gnttab_free_callback {
-+	struct gnttab_free_callback *next;
-+	void (*fn)(void *);
-+	void *arg;
-+	u16 count;
-+};
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++    typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++    typedef type * __guest_handle_ ## name
++#endif
 +
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+				int readonly);
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
++    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
++#define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
++#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
++#endif
 +
-+/*
-+ * End access through the given grant reference, iff the grant entry is no
-+ * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
-+ * use.
-+ */
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
++#if defined(__i386__)
++#include "xen-x86_32.h"
++#elif defined(__x86_64__)
++#include "xen-x86_64.h"
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long xen_pfn_t;
++#define PRI_xen_pfn "lx"
++#endif
 +
 +/*
-+ * Eventually end access through the given grant reference, and once that
-+ * access has been ended, free the given page too.  Access will be ended
-+ * immediately iff the grant entry is not in use, otherwise it will happen
-+ * some time later.  page may be 0, in which case no freeing will occur.
++ * SEGMENT DESCRIPTOR TABLES
 + */
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+			       unsigned long page);
-+
-+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
-+
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
-+
-+int gnttab_query_foreign_access(grant_ref_t ref);
-+
 +/*
-+ * operations on reserved batches of grant references
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
 + */
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
-+
-+void gnttab_free_grant_reference(grant_ref_t ref);
-+
-+void gnttab_free_grant_references(grant_ref_t head);
-+
-+int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
-+
-+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
-+
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+				    grant_ref_t release);
-+
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+				  void (*fn)(void *), void *arg, u16 count);
-+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
-+
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+				     unsigned long frame, int readonly);
-+
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
-+				       unsigned long pfn);
-+
-+int gnttab_suspend(void);
-+int gnttab_resume(void);
-+
-+static inline void
-+gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
-+		  uint32_t flags, grant_ref_t ref, domid_t domid)
-+{
-+	if (flags & GNTMAP_contains_pte)
-+		map->host_addr = addr;
-+	else if (xen_feature(XENFEAT_auto_translated_physmap))
-+		map->host_addr = __pa(addr);
-+	else
-+		map->host_addr = addr;
-+
-+	map->flags = flags;
-+	map->ref = ref;
-+	map->dom = domid;
-+}
-+
-+static inline void
-+gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
-+		    uint32_t flags, grant_handle_t handle)
-+{
-+	if (flags & GNTMAP_contains_pte)
-+		unmap->host_addr = addr;
-+	else if (xen_feature(XENFEAT_auto_translated_physmap))
-+		unmap->host_addr = __pa(addr);
-+	else
-+		unmap->host_addr = addr;
-+
-+	unmap->handle = handle;
-+	unmap->dev_bus_addr = 0;
-+}
++#define FIRST_RESERVED_GDT_PAGE  14
++#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
 +
-+#endif /* __ASM_GNTTAB_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/hvm.h tmp-linux-2.6-xen.patch/include/xen/hvm.h
---- pristine-linux-2.6.18.2/include/xen/hvm.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/hvm.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,23 @@
-+/* Simple wrappers around HVM functions */
-+#ifndef XEN_HVM_H__
-+#define XEN_HVM_H__
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
 +
-+#include <xen/interface/hvm/params.h>
 +
-+static inline unsigned long hvm_get_parameter(int idx)
-+{
-+	struct xen_hvm_param xhv;
-+	int r;
++/* Machine check support */
++#include "xen-mca.h"
 +
-+	xhv.domid = DOMID_SELF;
-+	xhv.index = idx;
-+	r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
-+	if (r < 0) {
-+		printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
-+		       idx, r);
-+		return 0;
-+	}
-+	return xhv.value;
-+}
++#ifndef __ASSEMBLY__
 +
-+#endif /* XEN_HVM_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/hypercall.h tmp-linux-2.6-xen.patch/include/xen/hypercall.h
---- pristine-linux-2.6.18.2/include/xen/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/hypercall.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,24 @@
-+#ifndef __XEN_HYPERCALL_H__
-+#define __XEN_HYPERCALL_H__
++typedef unsigned long xen_ulong_t;
 +
-+#include <asm/hypercall.h>
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table().
++ * The privilege level specifies which modes may enter a trap via a software
++ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
++ * privilege levels as follows:
++ *  Level == 0: Noone may enter
++ *  Level == 1: Kernel may enter
++ *  Level == 2: Kernel may enter
++ *  Level == 3: Everyone may enter
++ */
++#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
++#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
++struct trap_info {
++    uint8_t       vector;  /* exception vector                              */
++    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
++    uint16_t      cs;      /* code selector                                 */
++    unsigned long address; /* code offset                                   */
++};
++typedef struct trap_info trap_info_t;
++DEFINE_XEN_GUEST_HANDLE(trap_info_t);
 +
-+static inline int
-+HYPERVISOR_multicall_check(
-+	multicall_entry_t *call_list, int nr_calls,
-+	const unsigned long *rc_list)
-+{
-+	int rc = HYPERVISOR_multicall(call_list, nr_calls);
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
 +
-+	if (unlikely(rc < 0))
-+		return rc;
-+	BUG_ON(rc);
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled 
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++struct vcpu_guest_context {
++    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
++#define VGCF_I387_VALID                (1<<0)
++#define VGCF_IN_KERNEL                 (1<<2)
++#define _VGCF_i387_valid               0
++#define VGCF_i387_valid                (1<<_VGCF_i387_valid)
++#define _VGCF_in_kernel                2
++#define VGCF_in_kernel                 (1<<_VGCF_in_kernel)
++#define _VGCF_failsafe_disables_events 3
++#define VGCF_failsafe_disables_events  (1<<_VGCF_failsafe_disables_events)
++#define _VGCF_syscall_disables_events  4
++#define VGCF_syscall_disables_events   (1<<_VGCF_syscall_disables_events)
++#define _VGCF_online                   5
++#define VGCF_online                    (1<<_VGCF_online)
++    unsigned long flags;                    /* VGCF_* flags                 */
++    struct cpu_user_regs user_regs;         /* User-level CPU registers     */
++    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
++    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
++    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
++    /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
++    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
++    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
++#ifdef __i386__
++    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
++    unsigned long event_callback_eip;
++    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
++    unsigned long failsafe_callback_eip;
++#else
++    unsigned long event_callback_eip;
++    unsigned long failsafe_callback_eip;
++#ifdef __XEN__
++    union {
++        unsigned long syscall_callback_eip;
++        struct {
++            unsigned int event_callback_cs;    /* compat CS of event cb     */
++            unsigned int failsafe_callback_cs; /* compat CS of failsafe cb  */
++        };
++    };
++#else
++    unsigned long syscall_callback_eip;
++#endif
++#endif
++    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
++#ifdef __x86_64__
++    /* Segment base addresses. */
++    uint64_t      fs_base;
++    uint64_t      gs_base_kernel;
++    uint64_t      gs_base_user;
++#endif
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
 +
-+	for ( ; nr_calls > 0; --nr_calls, ++call_list)
-+		if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
-+			return nr_calls;
++struct arch_shared_info {
++    unsigned long max_pfn;                  /* max pfn that appears in table */
++    /* Frame containing list of mfns containing list of mfns containing p2m. */
++    xen_pfn_t     pfn_to_mfn_frame_list_list;
++    unsigned long nmi_reason;
++    uint64_t pad[32];
++};
++typedef struct arch_shared_info arch_shared_info_t;
 +
-+	return 0;
-+}
++#endif /* !__ASSEMBLY__ */
 +
-+#endif /* __XEN_HYPERCALL_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/hypervisor_sysfs.h tmp-linux-2.6-xen.patch/include/xen/hypervisor_sysfs.h
---- pristine-linux-2.6.18.2/include/xen/hypervisor_sysfs.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/hypervisor_sysfs.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,30 @@
 +/*
-+ *  copyright (c) 2006 IBM Corporation
-+ *  Authored by: Mike D. Day <ncmike at us.ibm.com>
-+ *
-+ *  This program is free software; you can redistribute it and/or modify
-+ *  it under the terms of the GNU General Public License version 2 as
-+ *  published by the Free Software Foundation.
++ * Prefix forces emulation of some non-trapping instructions.
++ * Currently only CPUID.
 + */
++#ifdef __ASSEMBLY__
++#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
++#define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
++#else
++#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
++#define XEN_CPUID          XEN_EMULATE_PREFIX "cpuid"
++#endif
 +
-+#ifndef _HYP_SYSFS_H_
-+#define _HYP_SYSFS_H_
-+
-+#include <linux/kobject.h>
-+#include <linux/sysfs.h>
-+
-+#define HYPERVISOR_ATTR_RO(_name) \
-+static struct hyp_sysfs_attr  _name##_attr = __ATTR_RO(_name)
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
 +
-+#define HYPERVISOR_ATTR_RW(_name) \
-+static struct hyp_sysfs_attr _name##_attr = \
-+	__ATTR(_name, 0644, _name##_show, _name##_store)
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86_32.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86_32.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_32.h
++ * 
++ * Guest OS interface to x86 32-bit Xen.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
 +
-+struct hyp_sysfs_attr {
-+	struct attribute attr;
-+	ssize_t (*show)(struct hyp_sysfs_attr *, char *);
-+	ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
-+	void *hyp_attr_data;
-+};
++#include "arch-x86/xen.h"
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/arch-x86_64.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/arch-x86_64.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_64.h
++ * 
++ * Guest OS interface to x86 64-bit Xen.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
 +
-+#endif /* _HYP_SYSFS_H_ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/acm.h tmp-linux-2.6-xen.patch/include/xen/interface/acm.h
---- pristine-linux-2.6.18.2/include/xen/interface/acm.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/acm.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,228 @@
-+/*
-+ * acm.h: Xen access control module interface defintions
++#include "arch-x86/xen.h"
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/callback.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/callback.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,121 @@
++/******************************************************************************
++ * callback.h
++ *
++ * Register guest OS callbacks with Xen.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -95937,204 +135191,95 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Reiner Sailer <sailer at watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
++ * Copyright (c) 2006, Ian Campbell
 + */
 +
-+#ifndef _XEN_PUBLIC_ACM_H
-+#define _XEN_PUBLIC_ACM_H
++#ifndef __XEN_PUBLIC_CALLBACK_H__
++#define __XEN_PUBLIC_CALLBACK_H__
 +
 +#include "xen.h"
 +
-+/* if ACM_DEBUG defined, all hooks should
-+ * print a short trace message (comment it out
-+ * when not in testing mode )
++/*
++ * Prototype for this hypercall is:
++ *   long callback_op(int cmd, void *extra_args)
++ * @cmd        == CALLBACKOP_??? (callback operation).
++ * @extra_args == Operation-specific extra arguments (NULL if none).
 + */
-+/* #define ACM_DEBUG */
-+
-+#ifdef ACM_DEBUG
-+#  define printkd(fmt, args...) printk(fmt,## args)
-+#else
-+#  define printkd(fmt, args...)
-+#endif
 +
-+/* default ssid reference value if not supplied */
-+#define ACM_DEFAULT_SSID  0x0
-+#define ACM_DEFAULT_LOCAL_SSID  0x0
++/* ia64, x86: Callback for event delivery. */
++#define CALLBACKTYPE_event                 0
 +
-+/* Internal ACM ERROR types */
-+#define ACM_OK     0
-+#define ACM_UNDEF   -1
-+#define ACM_INIT_SSID_ERROR  -2
-+#define ACM_INIT_SOID_ERROR  -3
-+#define ACM_ERROR          -4
++/* x86: Failsafe callback when guest state cannot be restored by Xen. */
++#define CALLBACKTYPE_failsafe              1
 +
-+/* External ACCESS DECISIONS */
-+#define ACM_ACCESS_PERMITTED        0
-+#define ACM_ACCESS_DENIED           -111
-+#define ACM_NULL_POINTER_ERROR      -200
++/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
++#define CALLBACKTYPE_syscall               2
 +
 +/*
-+   Error codes reported in when trying to test for a new policy
-+   These error codes are reported in an array of tuples where
-+   each error code is followed by a parameter describing the error
-+   more closely, such as a domain id.
-+*/
-+#define ACM_EVTCHN_SHARING_VIOLATION       0x100
-+#define ACM_GNTTAB_SHARING_VIOLATION       0x101
-+#define ACM_DOMAIN_LOOKUP                  0x102
-+#define ACM_CHWALL_CONFLICT                0x103
-+#define ACM_SSIDREF_IN_USE                 0x104
-+
-+
-+/* primary policy in lower 4 bits */
-+#define ACM_NULL_POLICY 0
-+#define ACM_CHINESE_WALL_POLICY 1
-+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
-+#define ACM_POLICY_UNDEFINED 15
-+
-+/* combinations have secondary policy component in higher 4bit */
-+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
-+    ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
-+
-+/* policy: */
-+#define ACM_POLICY_NAME(X) \
-+ ((X) == (ACM_NULL_POLICY)) ? "NULL" :                        \
-+    ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" :        \
-+    ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
-+    ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
-+     "UNDEFINED"
-+
-+/* the following policy versions must be increased
-+ * whenever the interpretation of the related
-+ * policy's data structure changes
++ * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
++ *     feature is enabled. Do not use this callback type in new code.
 + */
-+#define ACM_POLICY_VERSION 3
-+#define ACM_CHWALL_VERSION 1
-+#define ACM_STE_VERSION  1
-+
-+/* defines a ssid reference used by xen */
-+typedef uint32_t ssidref_t;
-+
-+/* hooks that are known to domains */
-+#define ACMHOOK_none    0
-+#define ACMHOOK_sharing 1
++#define CALLBACKTYPE_sysenter_deprecated   3
 +
-+/* -------security policy relevant type definitions-------- */
++/* x86: Callback for NMI delivery. */
++#define CALLBACKTYPE_nmi                   4
 +
-+/* type identifier; compares to "equal" or "not equal" */
-+typedef uint16_t domaintype_t;
++/*
++ * x86: sysenter is only available as follows:
++ * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
++ * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
++ *                      ('32-on-32-on-64', '32-on-64-on-64')
++ *                      [nb. also 64-bit guest applications on Intel CPUs
++ *                           ('64-on-64-on-64'), but syscall is preferred]
++ */
++#define CALLBACKTYPE_sysenter              5
 +
-+/* CHINESE WALL POLICY DATA STRUCTURES
-+ *
-+ * current accumulated conflict type set:
-+ * When a domain is started and has a type that is in
-+ * a conflict set, the conflicting types are incremented in
-+ * the aggregate set. When a domain is destroyed, the 
-+ * conflicting types to its type are decremented.
-+ * If a domain has multiple types, this procedure works over
-+ * all those types.
-+ *
-+ * conflict_aggregate_set[i] holds the number of
-+ *   running domains that have a conflict with type i.
-+ *
-+ * running_types[i] holds the number of running domains
-+ *        that include type i in their ssidref-referenced type set
-+ *
-+ * conflict_sets[i][j] is "0" if type j has no conflict
-+ *    with type i and is "1" otherwise.
++/*
++ * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
++ *                    ('32-on-32-on-64', '32-on-64-on-64')
 + */
-+/* high-16 = version, low-16 = check magic */
-+#define ACM_MAGIC  0x0001debc
++#define CALLBACKTYPE_syscall32             7
 +
-+/* each offset in bytes from start of the struct they
-+ * are part of */
++/*
++ * Disable event deliver during callback? This flag is ignored for event and
++ * NMI callbacks: event delivery is unconditionally disabled.
++ */
++#define _CALLBACKF_mask_events             0
++#define CALLBACKF_mask_events              (1U << _CALLBACKF_mask_events)
 +
-+/* V3 of the policy buffer aded a version structure */
-+struct acm_policy_version
-+{
-+    uint32_t major;
-+    uint32_t minor;
++/*
++ * Register a callback.
++ */
++#define CALLBACKOP_register                0
++struct callback_register {
++    uint16_t type;
++    uint16_t flags;
++    xen_callback_t address;
 +};
++typedef struct callback_register callback_register_t;
++DEFINE_XEN_GUEST_HANDLE(callback_register_t);
 +
-+
-+/* each buffer consists of all policy information for
-+ * the respective policy given in the policy code
++/*
++ * Unregister a callback.
 + *
-+ * acm_policy_buffer, acm_chwall_policy_buffer,
-+ * and acm_ste_policy_buffer need to stay 32-bit aligned
-+ * because we create binary policies also with external
-+ * tools that assume packed representations (e.g. the java tool)
++ * Not all callbacks can be unregistered. -EINVAL will be returned if
++ * you attempt to unregister such a callback.
 + */
-+struct acm_policy_buffer {
-+    uint32_t policy_version; /* ACM_POLICY_VERSION */
-+    uint32_t magic;
-+    uint32_t len;
-+    uint32_t policy_reference_offset;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_buffer_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_buffer_offset;
-+    struct acm_policy_version xml_pol_version; /* add in V3 */
-+};
-+
-+
-+struct acm_policy_reference_buffer {
-+    uint32_t len;
-+};
-+
-+struct acm_chwall_policy_buffer {
-+    uint32_t policy_version; /* ACM_CHWALL_VERSION */
-+    uint32_t policy_code;
-+    uint32_t chwall_max_types;
-+    uint32_t chwall_max_ssidrefs;
-+    uint32_t chwall_max_conflictsets;
-+    uint32_t chwall_ssid_offset;
-+    uint32_t chwall_conflict_sets_offset;
-+    uint32_t chwall_running_types_offset;
-+    uint32_t chwall_conflict_aggregate_offset;
-+};
-+
-+struct acm_ste_policy_buffer {
-+    uint32_t policy_version; /* ACM_STE_VERSION */
-+    uint32_t policy_code;
-+    uint32_t ste_max_types;
-+    uint32_t ste_max_ssidrefs;
-+    uint32_t ste_ssid_offset;
-+};
-+
-+struct acm_stats_buffer {
-+    uint32_t magic;
-+    uint32_t len;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_stats_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_stats_offset;
-+};
-+
-+struct acm_ste_stats_buffer {
-+    uint32_t ec_eval_count;
-+    uint32_t gt_eval_count;
-+    uint32_t ec_denied_count;
-+    uint32_t gt_denied_count;
-+    uint32_t ec_cachehit_count;
-+    uint32_t gt_cachehit_count;
-+};
-+
-+struct acm_ssid_buffer {
-+    uint32_t len;
-+    ssidref_t ssidref;
-+    uint32_t policy_reference_offset;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_max_types;
-+    uint32_t primary_types_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_max_types;
-+    uint32_t secondary_types_offset;
++#define CALLBACKOP_unregister              1
++struct callback_unregister {
++    uint16_t type;
++    uint16_t _unused;
 +};
++typedef struct callback_unregister callback_unregister_t;
++DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
 +
++#if __XEN_INTERFACE_VERSION__ < 0x00030207
++#undef CALLBACKTYPE_sysenter
++#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
 +#endif
 +
++#endif /* __XEN_PUBLIC_CALLBACK_H__ */
++
 +/*
 + * Local variables:
 + * mode: C
@@ -96144,13 +135289,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/acm_ops.h tmp-linux-2.6-xen.patch/include/xen/interface/acm_ops.h
---- pristine-linux-2.6.18.2/include/xen/interface/acm_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/acm_ops.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,159 @@
-+/*
-+ * acm_ops.h: Xen access control module hypervisor commands
-+ *
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/dom0_ops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/dom0_ops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,120 @@
++/******************************************************************************
++ * dom0_ops.h
++ * 
++ * Process command requests from domain-0 guest OS.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -96169,134 +135316,93 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Reiner Sailer <sailer at watson.ibm.com>
-+ * Copyright (c) 2005,2006 International Business Machines Corporation.
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
 + */
 +
-+#ifndef __XEN_PUBLIC_ACM_OPS_H__
-+#define __XEN_PUBLIC_ACM_OPS_H__
++#ifndef __XEN_PUBLIC_DOM0_OPS_H__
++#define __XEN_PUBLIC_DOM0_OPS_H__
 +
 +#include "xen.h"
-+#include "acm.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of acm tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define ACM_INTERFACE_VERSION   0xAAAA000A
-+
-+/************************************************************************/
-+
-+/*
-+ * Prototype for this hypercall is:
-+ *  int acm_op(int cmd, void *args)
-+ * @cmd  == ACMOP_??? (access control module operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
-+
++#include "platform.h"
 +
-+#define ACMOP_setpolicy         1
-+struct acm_setpolicy {
-+    /* IN */
-+    XEN_GUEST_HANDLE_64(void) pushcache;
-+    uint32_t pushcache_size;
-+};
++#if __XEN_INTERFACE_VERSION__ >= 0x00030204
++#error "dom0_ops.h is a compatibility interface only"
++#endif
 +
++#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
 +
-+#define ACMOP_getpolicy         2
-+struct acm_getpolicy {
-+    /* IN */
-+    XEN_GUEST_HANDLE_64(void) pullcache;
-+    uint32_t pullcache_size;
-+};
++#define DOM0_SETTIME          XENPF_settime
++#define dom0_settime          xenpf_settime
++#define dom0_settime_t        xenpf_settime_t
 +
++#define DOM0_ADD_MEMTYPE      XENPF_add_memtype
++#define dom0_add_memtype      xenpf_add_memtype
++#define dom0_add_memtype_t    xenpf_add_memtype_t
 +
-+#define ACMOP_dumpstats         3
-+struct acm_dumpstats {
-+    /* IN */
-+    XEN_GUEST_HANDLE_64(void) pullcache;
-+    uint32_t pullcache_size;
-+};
++#define DOM0_DEL_MEMTYPE      XENPF_del_memtype
++#define dom0_del_memtype      xenpf_del_memtype
++#define dom0_del_memtype_t    xenpf_del_memtype_t
 +
++#define DOM0_READ_MEMTYPE     XENPF_read_memtype
++#define dom0_read_memtype     xenpf_read_memtype
++#define dom0_read_memtype_t   xenpf_read_memtype_t
 +
-+#define ACMOP_getssid           4
-+#define ACM_GETBY_ssidref  1
-+#define ACM_GETBY_domainid 2
-+struct acm_getssid {
-+    /* IN */
-+    uint32_t get_ssid_by; /* ACM_GETBY_* */
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id;
-+    XEN_GUEST_HANDLE_64(void) ssidbuf;
-+    uint32_t ssidbuf_size;
-+};
++#define DOM0_MICROCODE        XENPF_microcode_update
++#define dom0_microcode        xenpf_microcode_update
++#define dom0_microcode_t      xenpf_microcode_update_t
 +
-+#define ACMOP_getdecision      5
-+struct acm_getdecision {
-+    /* IN */
-+    uint32_t get_decision_by1; /* ACM_GETBY_* */
-+    uint32_t get_decision_by2; /* ACM_GETBY_* */
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id1;
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id2;
-+    uint32_t hook;
-+    /* OUT */
-+    uint32_t acm_decision;
-+};
++#define DOM0_PLATFORM_QUIRK   XENPF_platform_quirk
++#define dom0_platform_quirk   xenpf_platform_quirk
++#define dom0_platform_quirk_t xenpf_platform_quirk_t
 +
++typedef uint64_t cpumap_t;
 +
-+#define ACMOP_chgpolicy        6
-+struct acm_change_policy {
-+    /* IN */
-+    XEN_GUEST_HANDLE_64(void) policy_pushcache;
-+    uint32_t policy_pushcache_size;
-+    XEN_GUEST_HANDLE_64(void) del_array;
-+    uint32_t delarray_size;
-+    XEN_GUEST_HANDLE_64(void) chg_array;
-+    uint32_t chgarray_size;
-+    /* OUT */
-+    /* array with error code */
-+    XEN_GUEST_HANDLE_64(void) err_array;
-+    uint32_t errarray_size;
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_MSR                 15
++struct dom0_msr {
++    /* IN variables. */
++    uint32_t write;
++    cpumap_t cpu_mask;
++    uint32_t msr;
++    uint32_t in1;
++    uint32_t in2;
++    /* OUT variables. */
++    uint32_t out1;
++    uint32_t out2;
 +};
++typedef struct dom0_msr dom0_msr_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
 +
-+#define ACMOP_relabeldoms       7
-+struct acm_relabel_doms {
-+    /* IN */
-+    XEN_GUEST_HANDLE_64(void) relabel_map;
-+    uint32_t relabel_map_size;
-+    /* OUT */
-+    XEN_GUEST_HANDLE_64(void) err_array;
-+    uint32_t errarray_size;
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_PHYSICAL_MEMORY_MAP 40
++struct dom0_memory_map_entry {
++    uint64_t start, end;
++    uint32_t flags; /* reserved */
++    uint8_t  is_ram;
 +};
++typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
 +
-+/* future interface to Xen */
-+struct xen_acmctl {
++struct dom0_op {
 +    uint32_t cmd;
-+    uint32_t interface_version;
++    uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
 +    union {
-+        struct acm_setpolicy     setpolicy;
-+        struct acm_getpolicy     getpolicy;
-+        struct acm_dumpstats     dumpstats;
-+        struct acm_getssid       getssid;
-+        struct acm_getdecision   getdecision;
-+        struct acm_change_policy change_policy;
-+        struct acm_relabel_doms  relabel_doms;
++        struct dom0_msr               msr;
++        struct dom0_settime           settime;
++        struct dom0_add_memtype       add_memtype;
++        struct dom0_del_memtype       del_memtype;
++        struct dom0_read_memtype      read_memtype;
++        struct dom0_microcode         microcode;
++        struct dom0_platform_quirk    platform_quirk;
++        struct dom0_memory_map_entry  physical_memory_map;
++        uint8_t                       pad[128];
 +    } u;
 +};
++typedef struct dom0_op dom0_op_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
 +
-+typedef struct xen_acmctl xen_acmctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
-+
-+#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
++#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
 +
 +/*
 + * Local variables:
@@ -96307,15 +135413,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-ia64.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-ia64.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-ia64.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-ia64.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,522 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/domctl.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/domctl.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,676 @@
 +/******************************************************************************
-+ * arch-ia64/hypervisor-if.h
++ * domctl.h
++ * 
++ * Domain management operations. For use by node control stack.
 + * 
-+ * Guest OS interface to IA64 Xen.
-+ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -96334,495 +135440,649 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
 + */
 +
-+#ifndef __HYPERVISOR_IF_IA64_H__
-+#define __HYPERVISOR_IF_IA64_H__
++#ifndef __XEN_PUBLIC_DOMCTL_H__
++#define __XEN_PUBLIC_DOMCTL_H__
 +
-+/* Structural guest handles introduced in 0x00030201. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+    typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+    typedef type * __guest_handle_ ## name
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "domctl operations are intended for use by node control tools only"
 +#endif
 +
-+#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
-+#define XEN_GUEST_HANDLE_64(name)       XEN_GUEST_HANDLE(name)
-+#define uint64_aligned_t                uint64_t
-+#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
-+#endif
++#include "xen.h"
 +
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+__DEFINE_XEN_GUEST_HANDLE(u64,   unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
++#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
 +
-+typedef unsigned long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#define PRI_xen_pfn "lx"
-+#endif
++struct xenctl_cpumap {
++    XEN_GUEST_HANDLE_64(uint8) bitmap;
++    uint32_t nr_cpus;
++};
 +
-+/* Arch specific VIRQs definition */
-+#define VIRQ_ITC        VIRQ_ARCH_0 /* V. Virtual itc timer */
-+#define VIRQ_MCA_CMC    VIRQ_ARCH_1 /* MCA cmc interrupt */
-+#define VIRQ_MCA_CPE    VIRQ_ARCH_2 /* MCA cpe interrupt */
++/*
++ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
++ * If it is specified as zero, an id is auto-allocated and returned.
++ */
++#define XEN_DOMCTL_createdomain       1
++struct xen_domctl_createdomain {
++    /* IN parameters */
++    uint32_t ssidref;
++    xen_domain_handle_t handle;
++ /* Is this an HVM guest (as opposed to a PV guest)? */
++#define _XEN_DOMCTL_CDF_hvm_guest 0
++#define XEN_DOMCTL_CDF_hvm_guest  (1U<<_XEN_DOMCTL_CDF_hvm_guest)
++ /* Use hardware-assisted paging if available? */
++#define _XEN_DOMCTL_CDF_hap       1
++#define XEN_DOMCTL_CDF_hap        (1U<<_XEN_DOMCTL_CDF_hap)
++    uint32_t flags;
++};
++typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
 +
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+/* WARNING: before changing this, check that shared_info fits on a page */
-+#define MAX_VIRT_CPUS 64
++#define XEN_DOMCTL_destroydomain      2
++#define XEN_DOMCTL_pausedomain        3
++#define XEN_DOMCTL_unpausedomain      4
++#define XEN_DOMCTL_resumedomain      27
 +
-+#ifndef __ASSEMBLY__
++#define XEN_DOMCTL_getdomaininfo      5
++struct xen_domctl_getdomaininfo {
++    /* OUT variables. */
++    domid_t  domain;              /* Also echoed in domctl.domain */
++ /* Domain is scheduled to die. */
++#define _XEN_DOMINF_dying     0
++#define XEN_DOMINF_dying      (1U<<_XEN_DOMINF_dying)
++ /* Domain is an HVM guest (as opposed to a PV guest). */
++#define _XEN_DOMINF_hvm_guest 1
++#define XEN_DOMINF_hvm_guest  (1U<<_XEN_DOMINF_hvm_guest)
++ /* The guest OS has shut down. */
++#define _XEN_DOMINF_shutdown  2
++#define XEN_DOMINF_shutdown   (1U<<_XEN_DOMINF_shutdown)
++ /* Currently paused by control software. */
++#define _XEN_DOMINF_paused    3
++#define XEN_DOMINF_paused     (1U<<_XEN_DOMINF_paused)
++ /* Currently blocked pending an event.     */
++#define _XEN_DOMINF_blocked   4
++#define XEN_DOMINF_blocked    (1U<<_XEN_DOMINF_blocked)
++ /* Domain is currently running.            */
++#define _XEN_DOMINF_running   5
++#define XEN_DOMINF_running    (1U<<_XEN_DOMINF_running)
++ /* Being debugged.  */
++#define _XEN_DOMINF_debugged  6
++#define XEN_DOMINF_debugged   (1U<<_XEN_DOMINF_debugged)
++ /* CPU to which this domain is bound.      */
++#define XEN_DOMINF_cpumask      255
++#define XEN_DOMINF_cpushift       8
++ /* XEN_DOMINF_shutdown guest-supplied code.  */
++#define XEN_DOMINF_shutdownmask 255
++#define XEN_DOMINF_shutdownshift 16
++    uint32_t flags;              /* XEN_DOMINF_* */
++    uint64_aligned_t tot_pages;
++    uint64_aligned_t max_pages;
++    uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
++    uint64_aligned_t cpu_time;
++    uint32_t nr_online_vcpus;    /* Number of VCPUs currently online. */
++    uint32_t max_vcpu_id;        /* Maximum VCPUID in use by this domain. */
++    uint32_t ssidref;
++    xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
 +
-+typedef unsigned long xen_ulong_t;
 +
-+#define INVALID_MFN       (~0UL)
++#define XEN_DOMCTL_getmemlist         6
++struct xen_domctl_getmemlist {
++    /* IN variables. */
++    /* Max entries to write to output buffer. */
++    uint64_aligned_t max_pfns;
++    /* Start index in guest's page list. */
++    uint64_aligned_t start_pfn;
++    XEN_GUEST_HANDLE_64(uint64) buffer;
++    /* OUT variables. */
++    uint64_aligned_t num_pfns;
++};
++typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
 +
-+#define MEM_G   (1UL << 30)
-+#define MEM_M   (1UL << 20)
-+#define MEM_K   (1UL << 10)
 +
-+#define MMIO_START       (3 * MEM_G)
-+#define MMIO_SIZE        (512 * MEM_M)
++#define XEN_DOMCTL_getpageframeinfo   7
 +
-+#define VGA_IO_START     0xA0000UL
-+#define VGA_IO_SIZE      0x20000
++#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
++#define XEN_DOMCTL_PFINFO_NOTAB   (0x0U<<28)
++#define XEN_DOMCTL_PFINFO_L1TAB   (0x1U<<28)
++#define XEN_DOMCTL_PFINFO_L2TAB   (0x2U<<28)
++#define XEN_DOMCTL_PFINFO_L3TAB   (0x3U<<28)
++#define XEN_DOMCTL_PFINFO_L4TAB   (0x4U<<28)
++#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
++#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
++#define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
++#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
 +
-+#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
-+#define LEGACY_IO_SIZE   (64*MEM_M)
++struct xen_domctl_getpageframeinfo {
++    /* IN variables. */
++    uint64_aligned_t gmfn; /* GMFN to query */
++    /* OUT variables. */
++    /* Is the page PINNED to a type? */
++    uint32_t type;         /* see above type defs */
++};
++typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
 +
-+#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
-+#define IO_PAGE_SIZE  PAGE_SIZE
 +
-+#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
-+#define STORE_PAGE_SIZE  PAGE_SIZE
++#define XEN_DOMCTL_getpageframeinfo2  8
++struct xen_domctl_getpageframeinfo2 {
++    /* IN variables. */
++    uint64_aligned_t num;
++    /* IN/OUT variables. */
++    XEN_GUEST_HANDLE_64(uint32) array;
++};
++typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
 +
-+#define BUFFER_IO_PAGE_START (STORE_PAGE_START+STORE_PAGE_SIZE)
-+#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
 +
-+#define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START+BUFFER_IO_PAGE_SIZE)
-+#define BUFFER_PIO_PAGE_SIZE PAGE_SIZE
++/*
++ * Control shadow pagetables operation
++ */
++#define XEN_DOMCTL_shadow_op         10
 +
-+#define IO_SAPIC_START   0xfec00000UL
-+#define IO_SAPIC_SIZE    0x100000
++/* Disable shadow mode. */
++#define XEN_DOMCTL_SHADOW_OP_OFF         0
 +
-+#define PIB_START 0xfee00000UL
-+#define PIB_SIZE 0x200000
++/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE      32
 +
-+#define GFW_START        (4*MEM_G -16*MEM_M)
-+#define GFW_SIZE         (16*MEM_M)
++/* Log-dirty bitmap operations. */
++ /* Return the bitmap and clean internal copy for next round. */
++#define XEN_DOMCTL_SHADOW_OP_CLEAN       11
++ /* Return the bitmap but do not modify internal copy. */
++#define XEN_DOMCTL_SHADOW_OP_PEEK        12
 +
-+/* Nvram belongs to GFW memory space  */
-+#define NVRAM_SIZE       (MEM_K * 64)
-+#define NVRAM_START      (GFW_START + 10 * MEM_M)
++/* Memory allocation accessors. */
++#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION   30
++#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION   31
 +
-+#define NVRAM_VALID_SIG 0x4650494e45584948 		// "HIXENIPF"
-+struct nvram_save_addr {
-+    unsigned long addr;
-+    unsigned long signature;
++/* Legacy enable operations. */
++ /* Equiv. to ENABLE with no mode flags. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST       1
++ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY   2
++ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE  3
++
++/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
++ /*
++  * Shadow pagetables are refcounted: guest does not use explicit mmu
++  * operations nor write-protect its pagetables.
++  */
++#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  (1 << 1)
++ /*
++  * Log pages in a bitmap as they are dirtied.
++  * Used for live relocation to determine which pages must be re-sent.
++  */
++#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
++ /*
++  * Automatically translate GPFNs into MFNs.
++  */
++#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
++ /*
++  * Xen does not steal virtual address space from the guest.
++  * Requires HVM support.
++  */
++#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL  (1 << 4)
++
++struct xen_domctl_shadow_op_stats {
++    uint32_t fault_count;
++    uint32_t dirty_count;
 +};
++typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
 +
-+struct pt_fpreg {
-+    union {
-+        unsigned long bits[2];
-+        long double __dummy;    /* force 16-byte alignment */
-+    } u;
++struct xen_domctl_shadow_op {
++    /* IN variables. */
++    uint32_t       op;       /* XEN_DOMCTL_SHADOW_OP_* */
++
++    /* OP_ENABLE */
++    uint32_t       mode;     /* XEN_DOMCTL_SHADOW_ENABLE_* */
++
++    /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
++    uint32_t       mb;       /* Shadow memory allocation in MB */
++
++    /* OP_PEEK / OP_CLEAN */
++    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++    uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
++    struct xen_domctl_shadow_op_stats stats;
 +};
++typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
 +
-+struct cpu_user_regs {
-+    /* The following registers are saved by SAVE_MIN: */
-+    unsigned long b6;  /* scratch */
-+    unsigned long b7;  /* scratch */
-+
-+    unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
-+    unsigned long ar_ssd; /* reserved for future use (scratch) */
-+
-+    unsigned long r8;  /* scratch (return value register 0) */
-+    unsigned long r9;  /* scratch (return value register 1) */
-+    unsigned long r10; /* scratch (return value register 2) */
-+    unsigned long r11; /* scratch (return value register 3) */
-+
-+    unsigned long cr_ipsr; /* interrupted task's psr */
-+    unsigned long cr_iip;  /* interrupted task's instruction pointer */
-+    unsigned long cr_ifs;  /* interrupted task's function state */
-+
-+    unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
-+    unsigned long ar_pfs;  /* prev function state  */
-+    unsigned long ar_rsc;  /* RSE configuration */
-+    /* The following two are valid only if cr_ipsr.cpl > 0: */
-+    unsigned long ar_rnat;  /* RSE NaT */
-+    unsigned long ar_bspstore; /* RSE bspstore */
-+
-+    unsigned long pr;  /* 64 predicate registers (1 bit each) */
-+    unsigned long b0;  /* return pointer (bp) */
-+    unsigned long loadrs;  /* size of dirty partition << 16 */
-+
-+    unsigned long r1;  /* the gp pointer */
-+    unsigned long r12; /* interrupted task's memory stack pointer */
-+    unsigned long r13; /* thread pointer */
-+
-+    unsigned long ar_fpsr;  /* floating point status (preserved) */
-+    unsigned long r15;  /* scratch */
-+
-+ /* The remaining registers are NOT saved for system calls.  */
-+
-+    unsigned long r14;  /* scratch */
-+    unsigned long r2;  /* scratch */
-+    unsigned long r3;  /* scratch */
-+    unsigned long r16;  /* scratch */
-+    unsigned long r17;  /* scratch */
-+    unsigned long r18;  /* scratch */
-+    unsigned long r19;  /* scratch */
-+    unsigned long r20;  /* scratch */
-+    unsigned long r21;  /* scratch */
-+    unsigned long r22;  /* scratch */
-+    unsigned long r23;  /* scratch */
-+    unsigned long r24;  /* scratch */
-+    unsigned long r25;  /* scratch */
-+    unsigned long r26;  /* scratch */
-+    unsigned long r27;  /* scratch */
-+    unsigned long r28;  /* scratch */
-+    unsigned long r29;  /* scratch */
-+    unsigned long r30;  /* scratch */
-+    unsigned long r31;  /* scratch */
-+    unsigned long ar_ccv;  /* compare/exchange value (scratch) */
 +
-+    /*
-+     * Floating point registers that the kernel considers scratch:
-+     */
-+    struct pt_fpreg f6;  /* scratch */
-+    struct pt_fpreg f7;  /* scratch */
-+    struct pt_fpreg f8;  /* scratch */
-+    struct pt_fpreg f9;  /* scratch */
-+    struct pt_fpreg f10;  /* scratch */
-+    struct pt_fpreg f11;  /* scratch */
-+    unsigned long r4;  /* preserved */
-+    unsigned long r5;  /* preserved */
-+    unsigned long r6;  /* preserved */
-+    unsigned long r7;  /* preserved */
-+    unsigned long eml_unat;    /* used for emulating instruction */
-+    unsigned long pad0;     /* alignment pad */
++#define XEN_DOMCTL_max_mem           11
++struct xen_domctl_max_mem {
++    /* IN variables. */
++    uint64_aligned_t max_memkb;
++};
++typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
 +
++
++#define XEN_DOMCTL_setvcpucontext    12
++#define XEN_DOMCTL_getvcpucontext    13
++struct xen_domctl_vcpucontext {
++    uint32_t              vcpu;                  /* IN */
++    XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
 +};
-+typedef struct cpu_user_regs cpu_user_regs_t;
++typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
 +
-+union vac {
-+    unsigned long value;
-+    struct {
-+        int a_int:1;
-+        int a_from_int_cr:1;
-+        int a_to_int_cr:1;
-+        int a_from_psr:1;
-+        int a_from_cpuid:1;
-+        int a_cover:1;
-+        int a_bsw:1;
-+        long reserved:57;
-+    };
++
++#define XEN_DOMCTL_getvcpuinfo       14
++struct xen_domctl_getvcpuinfo {
++    /* IN variables. */
++    uint32_t vcpu;
++    /* OUT variables. */
++    uint8_t  online;                  /* currently online (not hotplugged)? */
++    uint8_t  blocked;                 /* blocked waiting for an event? */
++    uint8_t  running;                 /* currently scheduled on its CPU? */
++    uint64_aligned_t cpu_time;        /* total cpu time consumed (ns) */
++    uint32_t cpu;                     /* current mapping   */
 +};
-+typedef union vac vac_t;
++typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
 +
-+union vdc {
-+    unsigned long value;
-+    struct {
-+        int d_vmsw:1;
-+        int d_extint:1;
-+        int d_ibr_dbr:1;
-+        int d_pmc:1;
-+        int d_to_pmd:1;
-+        int d_itm:1;
-+        long reserved:58;
-+    };
++
++/* Get/set which physical cpus a vcpu can execute on. */
++#define XEN_DOMCTL_setvcpuaffinity    9
++#define XEN_DOMCTL_getvcpuaffinity   25
++struct xen_domctl_vcpuaffinity {
++    uint32_t  vcpu;              /* IN */
++    struct xenctl_cpumap cpumap; /* IN/OUT */
 +};
-+typedef union vdc vdc_t;
++typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
 +
-+struct mapped_regs {
-+    union vac   vac;
-+    union vdc   vdc;
-+    unsigned long  virt_env_vaddr;
-+    unsigned long  reserved1[29];
-+    unsigned long  vhpi;
-+    unsigned long  reserved2[95];
-+    union {
-+        unsigned long  vgr[16];
-+        unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
-+    };
-+    union {
-+        unsigned long  vbgr[16];
-+        unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
-+    };
-+    unsigned long  vnat;
-+    unsigned long  vbnat;
-+    unsigned long  vcpuid[5];
-+    unsigned long  reserved3[11];
-+    unsigned long  vpsr;
-+    unsigned long  vpr;
-+    unsigned long  reserved4[76];
-+    union {
-+        unsigned long  vcr[128];
-+        struct {
-+            unsigned long dcr;  // CR0
-+            unsigned long itm;
-+            unsigned long iva;
-+            unsigned long rsv1[5];
-+            unsigned long pta;  // CR8
-+            unsigned long rsv2[7];
-+            unsigned long ipsr;  // CR16
-+            unsigned long isr;
-+            unsigned long rsv3;
-+            unsigned long iip;
-+            unsigned long ifa;
-+            unsigned long itir;
-+            unsigned long iipa;
-+            unsigned long ifs;
-+            unsigned long iim;  // CR24
-+            unsigned long iha;
-+            unsigned long rsv4[38];
-+            unsigned long lid;  // CR64
-+            unsigned long ivr;
-+            unsigned long tpr;
-+            unsigned long eoi;
-+            unsigned long irr[4];
-+            unsigned long itv;  // CR72
-+            unsigned long pmv;
-+            unsigned long cmcv;
-+            unsigned long rsv5[5];
-+            unsigned long lrr0;  // CR80
-+            unsigned long lrr1;
-+            unsigned long rsv6[46];
-+        };
-+    };
++
++#define XEN_DOMCTL_max_vcpus         15
++struct xen_domctl_max_vcpus {
++    uint32_t max;           /* maximum number of vcpus */
++};
++typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
++
++
++#define XEN_DOMCTL_scheduler_op      16
++/* Scheduler types. */
++#define XEN_SCHEDULER_SEDF     4
++#define XEN_SCHEDULER_CREDIT   5
++/* Set or get info? */
++#define XEN_DOMCTL_SCHEDOP_putinfo 0
++#define XEN_DOMCTL_SCHEDOP_getinfo 1
++struct xen_domctl_scheduler_op {
++    uint32_t sched_id;  /* XEN_SCHEDULER_* */
++    uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
 +    union {
-+        unsigned long  reserved5[128];
-+        struct {
-+            unsigned long precover_ifs;
-+            unsigned long unat;  // not sure if this is needed until NaT arch is done
-+            int interrupt_collection_enabled; // virtual psr.ic
-+            /* virtual interrupt deliverable flag is evtchn_upcall_mask in
-+             * shared info area now. interrupt_mask_addr is the address
-+             * of evtchn_upcall_mask for current vcpu
-+             */
-+            unsigned char *interrupt_mask_addr;
-+            int pending_interruption;
-+            unsigned char vpsr_pp;
-+            unsigned char vpsr_dfh;
-+            unsigned char hpsr_dfh;
-+            unsigned char hpsr_mfh;
-+            unsigned long reserved5_1[4];
-+            int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
-+            int banknum; // 0 or 1, which virtual register bank is active
-+            unsigned long rrs[8]; // region registers
-+            unsigned long krs[8]; // kernel registers
-+            unsigned long pkrs[8]; // protection key registers
-+            unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-+        };
-+    };
++        struct xen_domctl_sched_sedf {
++            uint64_aligned_t period;
++            uint64_aligned_t slice;
++            uint64_aligned_t latency;
++            uint32_t extratime;
++            uint32_t weight;
++        } sedf;
++        struct xen_domctl_sched_credit {
++            uint16_t weight;
++            uint16_t cap;
++        } credit;
++    } u;
 +};
-+typedef struct mapped_regs mapped_regs_t;
++typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
 +
-+struct vpd {
-+    struct mapped_regs vpd_low;
-+    unsigned long  reserved6[3456];
-+    unsigned long  vmm_avail[128];
-+    unsigned long  reserved7[4096];
++
++#define XEN_DOMCTL_setdomainhandle   17
++struct xen_domctl_setdomainhandle {
++    xen_domain_handle_t handle;
 +};
-+typedef struct vpd vpd_t;
++typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
 +
-+struct arch_vcpu_info {
++
++#define XEN_DOMCTL_setdebugging      18
++struct xen_domctl_setdebugging {
++    uint8_t enable;
 +};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
++typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
 +
-+struct arch_shared_info {
-+    /* PFN of the start_info page.  */
-+    unsigned long start_info_pfn;
 +
-+    /* Interrupt vector for event channel.  */
-+    int evtchn_vector;
++#define XEN_DOMCTL_irq_permission    19
++struct xen_domctl_irq_permission {
++    uint8_t pirq;
++    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
++};
++typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
++
 +
-+    uint64_t pad[32];
++#define XEN_DOMCTL_iomem_permission  20
++struct xen_domctl_iomem_permission {
++    uint64_aligned_t first_mfn;/* first page (physical page number) in range */
++    uint64_aligned_t nr_mfns;  /* number of pages in range (>0) */
++    uint8_t  allow_access;     /* allow (!0) or deny (0) access to range? */
 +};
-+typedef struct arch_shared_info arch_shared_info_t;
++typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
 +
-+typedef unsigned long xen_callback_t;
 +
-+struct ia64_tr_entry {
-+    unsigned long pte;
-+    unsigned long itir;
-+    unsigned long vadr;
-+    unsigned long rid;
++#define XEN_DOMCTL_ioport_permission 21
++struct xen_domctl_ioport_permission {
++    uint32_t first_port;              /* first port int range */
++    uint32_t nr_ports;                /* size of port range */
++    uint8_t  allow_access;            /* allow or deny access to range? */
 +};
++typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
 +
-+struct vcpu_extra_regs {
-+    struct ia64_tr_entry itrs[8];
-+    struct ia64_tr_entry dtrs[8];
-+    unsigned long iva;
-+    unsigned long dcr;
-+    unsigned long event_callback_ip;
++
++#define XEN_DOMCTL_hypercall_init    22
++struct xen_domctl_hypercall_init {
++    uint64_aligned_t  gmfn;           /* GMFN to be initialised */
 +};
++typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
 +
-+struct vcpu_guest_context {
-+#define VGCF_EXTRA_REGS (1<<1)	/* Get/Set extra regs.  */
-+    unsigned long flags;       /* VGCF_* flags */
 +
-+    struct cpu_user_regs user_regs;
-+    struct vcpu_extra_regs extra_regs;
-+    unsigned long privregs_pfn;
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++#define XEN_DOMCTL_arch_setup        23
++#define _XEN_DOMAINSETUP_hvm_guest 0
++#define XEN_DOMAINSETUP_hvm_guest  (1UL<<_XEN_DOMAINSETUP_hvm_guest)
++#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save)  */
++#define XEN_DOMAINSETUP_query  (1UL<<_XEN_DOMAINSETUP_query)
++#define _XEN_DOMAINSETUP_sioemu_guest 2
++#define XEN_DOMAINSETUP_sioemu_guest  (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
++typedef struct xen_domctl_arch_setup {
++    uint64_aligned_t flags;  /* XEN_DOMAINSETUP_* */
++#ifdef __ia64__
++    uint64_aligned_t bp;     /* mpaddr of boot param area */
++    uint64_aligned_t maxmem; /* Highest memory address for MDT.  */
++    uint64_aligned_t xsi_va; /* Xen shared_info area virtual address.  */
++    uint32_t hypercall_imm;  /* Break imm for Xen hypercalls.  */
++    int8_t vhpt_size_log2;   /* Log2 of VHPT size. */
++#endif
++} xen_domctl_arch_setup_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
 +
-+/* dom0 vp op */
-+#define __HYPERVISOR_ia64_dom0vp_op     __HYPERVISOR_arch_0
-+/*  Map io space in machine address to dom0 physical address space.
-+    Currently physical assigned address equals to machine address.  */
-+#define IA64_DOM0VP_ioremap             0
 +
-+/* Convert a pseudo physical page frame number to the corresponding
-+   machine page frame number. If no page is assigned, INVALID_MFN or
-+   GPFN_INV_MASK is returned depending on domain's non-vti/vti mode.  */
-+#define IA64_DOM0VP_phystomach          1
++#define XEN_DOMCTL_settimeoffset     24
++struct xen_domctl_settimeoffset {
++    int32_t  time_offset_seconds; /* applied to domain wallclock time */
++};
++typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
 +
-+/* Convert a machine page frame number to the corresponding pseudo physical
-+   page frame number of the caller domain.  */
-+#define IA64_DOM0VP_machtophys          3
++ 
++#define XEN_DOMCTL_gethvmcontext     33
++#define XEN_DOMCTL_sethvmcontext     34
++typedef struct xen_domctl_hvmcontext {
++    uint32_t size; /* IN/OUT: size of buffer / bytes filled */
++    XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
++                                        * gethvmcontext with NULL
++                                        * buffer to get size req'd */
++} xen_domctl_hvmcontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
 +
-+/* Reserved for future use.  */
-+#define IA64_DOM0VP_iounmap             4
 +
-+/* Unmap and free pages contained in the specified pseudo physical region.  */
-+#define IA64_DOM0VP_zap_physmap         5
++#define XEN_DOMCTL_set_address_size  35
++#define XEN_DOMCTL_get_address_size  36
++typedef struct xen_domctl_address_size {
++    uint32_t size;
++} xen_domctl_address_size_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
 +
-+/* Assign machine page frame to dom0's pseudo physical address space.  */
-+#define IA64_DOM0VP_add_physmap         6
 +
-+/* expose the p2m table into domain */
-+#define IA64_DOM0VP_expose_p2m          7
++#define XEN_DOMCTL_real_mode_area    26
++struct xen_domctl_real_mode_area {
++    uint32_t log; /* log2 of Real Mode Area size */
++};
++typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
 +
-+/* xen perfmon */
-+#define IA64_DOM0VP_perfmon             8
 +
-+/* gmfn version of IA64_DOM0VP_add_physmap */
-+#define IA64_DOM0VP_add_physmap_with_gmfn       9
++#define XEN_DOMCTL_sendtrigger       28
++#define XEN_DOMCTL_SENDTRIGGER_NMI    0
++#define XEN_DOMCTL_SENDTRIGGER_RESET  1
++#define XEN_DOMCTL_SENDTRIGGER_INIT   2
++struct xen_domctl_sendtrigger {
++    uint32_t  trigger;  /* IN */
++    uint32_t  vcpu;     /* IN */
++};
++typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
 +
-+/* Add an I/O port space range */
-+#define IA64_DOM0VP_add_io_space        11
 +
-+// flags for page assignement to pseudo physical address space
-+#define _ASSIGN_readonly                0
-+#define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
-+#define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
-+/* Internal only: memory attribute must be WC/UC/UCE.  */
-+#define _ASSIGN_nocache                 1
-+#define ASSIGN_nocache                  (1UL << _ASSIGN_nocache)
-+// tlb tracking
-+#define _ASSIGN_tlb_track               2
-+#define ASSIGN_tlb_track                (1UL << _ASSIGN_tlb_track)
-+/* Internal only: associated with PGC_allocated bit */
-+#define _ASSIGN_pgc_allocated           3
-+#define ASSIGN_pgc_allocated            (1UL << _ASSIGN_pgc_allocated)
++/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
++#define XEN_DOMCTL_assign_device      37
++#define XEN_DOMCTL_test_assign_device 45
++#define XEN_DOMCTL_deassign_device 47
++struct xen_domctl_assign_device {
++    uint32_t  machine_bdf;   /* machine PCI ID of assigned device */
++};
++typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
++
++/* Retrieve sibling devices infomation of machine_bdf */
++#define XEN_DOMCTL_get_device_group 50
++struct xen_domctl_get_device_group {
++    uint32_t  machine_bdf;      /* IN */
++    uint32_t  max_sdevs;        /* IN */
++    uint32_t  num_sdevs;        /* OUT */
++    XEN_GUEST_HANDLE_64(uint32)  sdev_array;   /* OUT */
++};
++typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
++
++/* Pass-through interrupts: bind real irq -> hvm devfn. */
++#define XEN_DOMCTL_bind_pt_irq       38
++#define XEN_DOMCTL_unbind_pt_irq     48
++typedef enum pt_irq_type_e {
++    PT_IRQ_TYPE_PCI,
++    PT_IRQ_TYPE_ISA,
++    PT_IRQ_TYPE_MSI,
++} pt_irq_type_t;
++struct xen_domctl_bind_pt_irq {
++    uint32_t machine_irq;
++    pt_irq_type_t irq_type;
++    uint32_t hvm_domid;
 +
-+/* This structure has the same layout of struct ia64_boot_param, defined in
-+   <asm/system.h>.  It is redefined here to ease use.  */
-+struct xen_ia64_boot_param {
-+	unsigned long command_line;	/* physical address of cmd line args */
-+	unsigned long efi_systab;	/* physical address of EFI system table */
-+	unsigned long efi_memmap;	/* physical address of EFI memory map */
-+	unsigned long efi_memmap_size;	/* size of EFI memory map */
-+	unsigned long efi_memdesc_size;	/* size of an EFI memory map descriptor */
-+	unsigned int  efi_memdesc_version;	/* memory descriptor version */
-+	struct {
-+		unsigned short num_cols;	/* number of columns on console.  */
-+		unsigned short num_rows;	/* number of rows on console.  */
-+		unsigned short orig_x;	/* cursor's x position */
-+		unsigned short orig_y;	/* cursor's y position */
-+	} console_info;
-+	unsigned long fpswa;		/* physical address of the fpswa interface */
-+	unsigned long initrd_start;
-+	unsigned long initrd_size;
-+	unsigned long domain_start;	/* va where the boot time domain begins */
-+	unsigned long domain_size;	/* how big is the boot domain */
++    union {
++        struct {
++            uint8_t isa_irq;
++        } isa;
++        struct {
++            uint8_t bus;
++            uint8_t device;
++            uint8_t intx;
++        } pci;
++        struct {
++            uint8_t gvec;
++            uint32_t gflags;
++        } msi;
++    } u;
 +};
++typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
 +
-+#endif /* !__ASSEMBLY__ */
 +
-+/* Size of the shared_info area (this is not related to page size).  */
-+#define XSI_SHIFT			14
-+#define XSI_SIZE			(1 << XSI_SHIFT)
-+/* Log size of mapped_regs area (64 KB - only 4KB is used).  */
-+#define XMAPPEDREGS_SHIFT		12
-+#define XMAPPEDREGS_SIZE		(1 << XMAPPEDREGS_SHIFT)
-+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE.  */
-+#define XMAPPEDREGS_OFS			XSI_SIZE
++/* Bind machine I/O address range -> HVM address range. */
++#define XEN_DOMCTL_memory_mapping    39
++#define DPCI_ADD_MAPPING         1
++#define DPCI_REMOVE_MAPPING      0
++struct xen_domctl_memory_mapping {
++    uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
++    uint64_aligned_t first_mfn; /* first page (machine page) in range */
++    uint64_aligned_t nr_mfns;   /* number of pages in range (>0) */
++    uint32_t add_mapping;       /* add or remove mapping */
++    uint32_t padding;           /* padding for 64-bit aligned structure */
++};
++typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
++
++
++/* Bind machine I/O port range -> HVM I/O port range. */
++#define XEN_DOMCTL_ioport_mapping    40
++struct xen_domctl_ioport_mapping {
++    uint32_t first_gport;     /* first guest IO port*/
++    uint32_t first_mport;     /* first machine IO port */
++    uint32_t nr_ports;        /* size of port range */
++    uint32_t add_mapping;     /* add or remove mapping */
++};
++typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
 +
-+/* Hyperprivops.  */
-+#define HYPERPRIVOP_START		0x1
-+#define HYPERPRIVOP_RFI			(HYPERPRIVOP_START + 0x0)
-+#define HYPERPRIVOP_RSM_DT		(HYPERPRIVOP_START + 0x1)
-+#define HYPERPRIVOP_SSM_DT		(HYPERPRIVOP_START + 0x2)
-+#define HYPERPRIVOP_COVER		(HYPERPRIVOP_START + 0x3)
-+#define HYPERPRIVOP_ITC_D		(HYPERPRIVOP_START + 0x4)
-+#define HYPERPRIVOP_ITC_I		(HYPERPRIVOP_START + 0x5)
-+#define HYPERPRIVOP_SSM_I		(HYPERPRIVOP_START + 0x6)
-+#define HYPERPRIVOP_GET_IVR		(HYPERPRIVOP_START + 0x7)
-+#define HYPERPRIVOP_GET_TPR		(HYPERPRIVOP_START + 0x8)
-+#define HYPERPRIVOP_SET_TPR		(HYPERPRIVOP_START + 0x9)
-+#define HYPERPRIVOP_EOI			(HYPERPRIVOP_START + 0xa)
-+#define HYPERPRIVOP_SET_ITM		(HYPERPRIVOP_START + 0xb)
-+#define HYPERPRIVOP_THASH		(HYPERPRIVOP_START + 0xc)
-+#define HYPERPRIVOP_PTC_GA		(HYPERPRIVOP_START + 0xd)
-+#define HYPERPRIVOP_ITR_D		(HYPERPRIVOP_START + 0xe)
-+#define HYPERPRIVOP_GET_RR		(HYPERPRIVOP_START + 0xf)
-+#define HYPERPRIVOP_SET_RR		(HYPERPRIVOP_START + 0x10)
-+#define HYPERPRIVOP_SET_KR		(HYPERPRIVOP_START + 0x11)
-+#define HYPERPRIVOP_FC			(HYPERPRIVOP_START + 0x12)
-+#define HYPERPRIVOP_GET_CPUID		(HYPERPRIVOP_START + 0x13)
-+#define HYPERPRIVOP_GET_PMD		(HYPERPRIVOP_START + 0x14)
-+#define HYPERPRIVOP_GET_EFLAG		(HYPERPRIVOP_START + 0x15)
-+#define HYPERPRIVOP_SET_EFLAG		(HYPERPRIVOP_START + 0x16)
-+#define HYPERPRIVOP_RSM_BE		(HYPERPRIVOP_START + 0x17)
-+#define HYPERPRIVOP_GET_PSR		(HYPERPRIVOP_START + 0x18)
-+#define HYPERPRIVOP_MAX			(0x19)
 +
-+/* Fast and light hypercalls.  */
-+#define __HYPERVISOR_ia64_fast_eoi	__HYPERVISOR_arch_1
++/*
++ * Pin caching type of RAM space for x86 HVM domU.
++ */
++#define XEN_DOMCTL_pin_mem_cacheattr 41
++/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
++#define XEN_DOMCTL_MEM_CACHEATTR_UC  0
++#define XEN_DOMCTL_MEM_CACHEATTR_WC  1
++#define XEN_DOMCTL_MEM_CACHEATTR_WT  4
++#define XEN_DOMCTL_MEM_CACHEATTR_WP  5
++#define XEN_DOMCTL_MEM_CACHEATTR_WB  6
++#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
++struct xen_domctl_pin_mem_cacheattr {
++    uint64_aligned_t start, end;
++    unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
++};
++typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
++
++
++#define XEN_DOMCTL_set_ext_vcpucontext 42
++#define XEN_DOMCTL_get_ext_vcpucontext 43
++struct xen_domctl_ext_vcpucontext {
++    /* IN: VCPU that this call applies to. */
++    uint32_t         vcpu;
++    /*
++     * SET: Size of struct (IN)
++     * GET: Size of struct (OUT)
++     */
++    uint32_t         size;
++#if defined(__i386__) || defined(__x86_64__)
++    /* SYSCALL from 32-bit mode and SYSENTER callback information. */
++    /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
++    uint64_aligned_t syscall32_callback_eip;
++    uint64_aligned_t sysenter_callback_eip;
++    uint16_t         syscall32_callback_cs;
++    uint16_t         sysenter_callback_cs;
++    uint8_t          syscall32_disables_events;
++    uint8_t          sysenter_disables_events;
++#endif
++};
++typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
 +
-+/* Xencomm macros.  */
-+#define XENCOMM_INLINE_MASK 0xf800000000000000UL
-+#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
++/*
++ * Set optimizaton features for a domain
++ */
++#define XEN_DOMCTL_set_opt_feature    44
++struct xen_domctl_set_opt_feature {
++#if defined(__ia64__)
++    struct xen_ia64_opt_feature optf;
++#else
++    /* Make struct non-empty: do not depend on this field name! */
++    uint64_t dummy;
++#endif
++};
++typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
 +
-+#define XENCOMM_IS_INLINE(addr) \
-+  (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG)
-+#define XENCOMM_INLINE_ADDR(addr) \
-+  ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
++/*
++ * Set the target domain for a domain
++ */
++#define XEN_DOMCTL_set_target    46
++struct xen_domctl_set_target {
++    domid_t target;
++};
++typedef struct xen_domctl_set_target xen_domctl_set_target_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
 +
-+/* xen perfmon */
-+#ifdef XEN
-+#ifndef __ASSEMBLY__
-+#ifndef _ASM_IA64_PERFMON_H
++#if defined(__i386__) || defined(__x86_64__)
++# define XEN_CPUID_INPUT_UNUSED  0xFFFFFFFF
++# define XEN_DOMCTL_set_cpuid 49
++struct xen_domctl_cpuid {
++  unsigned int  input[2];
++  unsigned int  eax;
++  unsigned int  ebx;
++  unsigned int  ecx;
++  unsigned int  edx;
++};
++typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
++#endif
 +
-+#include <xen/list.h>   // asm/perfmon.h requires struct list_head
-+#include <asm/perfmon.h>
-+// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
++#define XEN_DOMCTL_subscribe          29
++struct xen_domctl_subscribe {
++    uint32_t port; /* IN */
++};
++typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
 +
-+#endif /* _ASM_IA64_PERFMON_H */
++/*
++ * Define the maximum machine address size which should be allocated
++ * to a guest.
++ */
++#define XEN_DOMCTL_set_machine_address_size  51
++#define XEN_DOMCTL_get_machine_address_size  52
 +
-+DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
-+#endif /* __ASSEMBLY__ */
-+#endif /* XEN */
 +
-+#endif /* __HYPERVISOR_IF_IA64_H__ */
++struct xen_domctl {
++    uint32_t cmd;
++    uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
++    domid_t  domain;
++    union {
++        struct xen_domctl_createdomain      createdomain;
++        struct xen_domctl_getdomaininfo     getdomaininfo;
++        struct xen_domctl_getmemlist        getmemlist;
++        struct xen_domctl_getpageframeinfo  getpageframeinfo;
++        struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
++        struct xen_domctl_vcpuaffinity      vcpuaffinity;
++        struct xen_domctl_shadow_op         shadow_op;
++        struct xen_domctl_max_mem           max_mem;
++        struct xen_domctl_vcpucontext       vcpucontext;
++        struct xen_domctl_getvcpuinfo       getvcpuinfo;
++        struct xen_domctl_max_vcpus         max_vcpus;
++        struct xen_domctl_scheduler_op      scheduler_op;
++        struct xen_domctl_setdomainhandle   setdomainhandle;
++        struct xen_domctl_setdebugging      setdebugging;
++        struct xen_domctl_irq_permission    irq_permission;
++        struct xen_domctl_iomem_permission  iomem_permission;
++        struct xen_domctl_ioport_permission ioport_permission;
++        struct xen_domctl_hypercall_init    hypercall_init;
++        struct xen_domctl_arch_setup        arch_setup;
++        struct xen_domctl_settimeoffset     settimeoffset;
++        struct xen_domctl_real_mode_area    real_mode_area;
++        struct xen_domctl_hvmcontext        hvmcontext;
++        struct xen_domctl_address_size      address_size;
++        struct xen_domctl_sendtrigger       sendtrigger;
++        struct xen_domctl_get_device_group  get_device_group;
++        struct xen_domctl_assign_device     assign_device;
++        struct xen_domctl_bind_pt_irq       bind_pt_irq;
++        struct xen_domctl_memory_mapping    memory_mapping;
++        struct xen_domctl_ioport_mapping    ioport_mapping;
++        struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
++        struct xen_domctl_ext_vcpucontext   ext_vcpucontext;
++        struct xen_domctl_set_opt_feature   set_opt_feature;
++        struct xen_domctl_set_target        set_target;
++        struct xen_domctl_subscribe         subscribe;
++#if defined(__i386__) || defined(__x86_64__)
++        struct xen_domctl_cpuid             cpuid;
++#endif
++        uint8_t                             pad[128];
++    } u;
++};
++typedef struct xen_domctl xen_domctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
++
++#endif /* __XEN_PUBLIC_DOMCTL_H__ */
 +
 +/*
 + * Local variables:
@@ -96833,11 +136093,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-powerpc.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-powerpc.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-powerpc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-powerpc.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,125 @@
-+/*
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/elfnote.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/elfnote.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * elfnote.h
++ *
++ * Definitions used for the Xen ELF notes.
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -96856,311 +136120,206 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (C) IBM Corp. 2005, 2006
-+ *
-+ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
 + */
 +
-+#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
-+#define __XEN_PUBLIC_ARCH_PPC_64_H__
-+
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+    typedef struct { \
-+        int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
-+        type *p; \
-+    } __attribute__((__aligned__(8))) __guest_handle_ ## name
-+
-+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
-+#define set_xen_guest_handle(hnd, val) \
-+    do { \
-+        if (sizeof ((hnd).__pad)) \
-+            (hnd).__pad[0] = 0; \
-+        (hnd).p = val; \
-+    } while (0)
-+
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
-+
-+typedef unsigned long long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#define PRI_xen_pfn "llx"
-+#endif
++#ifndef __XEN_PUBLIC_ELFNOTE_H__
++#define __XEN_PUBLIC_ELFNOTE_H__
 +
 +/*
-+ * Pointers and other address fields inside interface structures are padded to
-+ * 64 bits. This means that field alignments aren't different between 32- and
-+ * 64-bit architectures. 
++ * The notes should live in a PT_NOTE segment and have "Xen" in the
++ * name field.
++ *
++ * Numeric types are either 4 or 8 bytes depending on the content of
++ * the desc field.
++ *
++ * LEGACY indicated the fields in the legacy __xen_guest string which
++ * this a note type replaces.
 + */
-+/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
-+#define __MEMORY_PADDING(_X)
-+#define _MEMORY_PADDING(_X)  __MEMORY_PADDING(_X)
-+#define MEMORY_PADDING       _MEMORY_PADDING(__LINE__)
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
-+
-+#ifndef __ASSEMBLY__
-+
-+#define XENCOMM_INLINE_FLAG (1UL << 63)
-+
-+typedef uint64_t xen_ulong_t;
-+
-+/* User-accessible registers: nost of these need to be saved/restored
-+ * for every nested Xen invocation. */
-+struct cpu_user_regs
-+{
-+    uint64_t gprs[32];
-+    uint64_t lr;
-+    uint64_t ctr;
-+    uint64_t srr0;
-+    uint64_t srr1;
-+    uint64_t pc;
-+    uint64_t msr;
-+    uint64_t fpscr;             /* XXX Is this necessary */
-+    uint64_t xer;
-+    uint64_t hid4;              /* debug only */
-+    uint64_t dar;               /* debug only */
-+    uint32_t dsisr;             /* debug only */
-+    uint32_t cr;
-+    uint32_t __pad;             /* good spot for another 32bit reg */
-+    uint32_t entry_vector;
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
 +
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
-+
-+/* ONLY used to communicate with dom0! See also struct exec_domain. */
-+struct vcpu_guest_context {
-+    cpu_user_regs_t user_regs;         /* User-level CPU registers     */
-+    uint64_t sdr1;                     /* Pagetable base               */
-+    /* XXX etc */
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++/*
++ * NAME=VALUE pair (string).
++ */
++#define XEN_ELFNOTE_INFO           0
 +
-+struct arch_shared_info {
-+    uint64_t boot_timebase;
-+};
++/*
++ * The virtual address of the entry point (numeric).
++ *
++ * LEGACY: VIRT_ENTRY
++ */
++#define XEN_ELFNOTE_ENTRY          1
 +
-+struct arch_vcpu_info {
-+};
++/* The virtual address of the hypercall transfer page (numeric).
++ *
++ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
++ * number not a virtual address)
++ */
++#define XEN_ELFNOTE_HYPERCALL_PAGE 2
 +
-+/* Support for multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+#endif
++/* The virtual address where the kernel image should be mapped (numeric).
++ *
++ * Defaults to 0.
++ *
++ * LEGACY: VIRT_BASE
++ */
++#define XEN_ELFNOTE_VIRT_BASE      3
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,205 @@
-+/******************************************************************************
-+ * arch-x86/xen.h
-+ * 
-+ * Guest OS interface to x86 Xen.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
++/*
++ * The offset of the ELF paddr field from the acutal required
++ * psuedo-physical address (numeric).
 + *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
++ * This is used to maintain backwards compatibility with older kernels
++ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
++ * if not present.
 + *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
++ */
++#define XEN_ELFNOTE_PADDR_OFFSET   4
++
++/*
++ * The version of Xen that we work with (string).
 + *
-+ * Copyright (c) 2004-2006, K A Fraser
++ * LEGACY: XEN_VER
 + */
++#define XEN_ELFNOTE_XEN_VERSION    5
 +
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_H__
++/*
++ * The name of the guest operating system (string).
++ *
++ * LEGACY: GUEST_OS
++ */
++#define XEN_ELFNOTE_GUEST_OS       6
 +
-+/* Structural guest handles introduced in 0x00030201. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+    typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+    typedef type * __guest_handle_ ## name
-+#endif
++/*
++ * The version of the guest operating system (string).
++ *
++ * LEGACY: GUEST_VER
++ */
++#define XEN_ELFNOTE_GUEST_VERSION  7
 +
-+#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
-+#define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
-+#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
-+#endif
++/*
++ * The loader type (string).
++ *
++ * LEGACY: LOADER
++ */
++#define XEN_ELFNOTE_LOADER         8
 +
-+#if defined(__i386__)
-+#include "xen-x86_32.h"
-+#elif defined(__x86_64__)
-+#include "xen-x86_64.h"
-+#endif
++/*
++ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
++ * "bimodal").
++ *
++ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
++ * may be given as "yes,bimodal" which will cause older Xen to treat
++ * this kernel as PAE.
++ *
++ * LEGACY: PAE (n.b. The legacy interface included a provision to
++ * indicate 'extended-cr3' support allowing L3 page tables to be
++ * placed above 4G. It is assumed that any kernel new enough to use
++ * these ELF notes will include this and therefore "yes" here is
++ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
++ */
++#define XEN_ELFNOTE_PAE_MODE       9
 +
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
++/*
++ * The features supported/required by this kernel (string).
++ *
++ * The string must consist of a list of feature names (as given in
++ * features.h, without the "XENFEAT_" prefix) separated by '|'
++ * characters. If a feature is required for the kernel to function
++ * then the feature name must be preceded by a '!' character.
++ *
++ * LEGACY: FEATURES
++ */
++#define XEN_ELFNOTE_FEATURES      10
 +
-+typedef unsigned long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#define PRI_xen_pfn "lx"
-+#endif
++/*
++ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
++ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
++ * of this string as a boolean flag rather than requiring "yes" or
++ * "no".
++ */
++#define XEN_ELFNOTE_BSD_SYMTAB    11
 +
 +/*
-+ * SEGMENT DESCRIPTOR TABLES
++ * The lowest address the hypervisor hole can begin at (numeric).
++ *
++ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
++ * also indicates to the hypervisor that the kernel can deal with the
++ * hole starting at a higher address.
 + */
++#define XEN_ELFNOTE_HV_START_LOW  12
++
 +/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
++ * List of maddr_t-sized mask/value pairs describing how to recognize
++ * (non-present) L1 page table entries carrying valid MFNs (numeric).
 + */
-+#define FIRST_RESERVED_GDT_PAGE  14
-+#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++#define XEN_ELFNOTE_L1_MFN_VALID  13
 +
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
++/*
++ * Whether or not the guest supports cooperative suspend cancellation.
++ */
++#define XEN_ELFNOTE_SUSPEND_CANCEL 14
 +
-+#ifndef __ASSEMBLY__
++/*
++ * The number of the highest elfnote defined.
++ */
++#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
 +
-+typedef unsigned long xen_ulong_t;
++/*
++ * System information exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO 
++ * note in case of a system crash. This note will contain various
++ * information about the system, see xen/include/xen/elfcore.h.
++ */
++#define XEN_ELFNOTE_CRASH_INFO 0x1000001
 +
 +/*
-+ * Send an array of these to HYPERVISOR_set_trap_table().
-+ * The privilege level specifies which modes may enter a trap via a software
-+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
-+ * privilege levels as follows:
-+ *  Level == 0: Noone may enter
-+ *  Level == 1: Kernel may enter
-+ *  Level == 2: Kernel may enter
-+ *  Level == 3: Everyone may enter
++ * System registers exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS 
++ * note per cpu in case of a system crash. This note is architecture
++ * specific and will contain registers not saved in the "CORE" note.
++ * See xen/include/xen/elfcore.h for more information.
 + */
-+#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
-+struct trap_info {
-+    uint8_t       vector;  /* exception vector                              */
-+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
-+    uint16_t      cs;      /* code selector                                 */
-+    unsigned long address; /* code offset                                   */
-+};
-+typedef struct trap_info trap_info_t;
-+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
++#define XEN_ELFNOTE_CRASH_REGS 0x1000002
 +
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
 +
 +/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled 
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ * xen dump-core none note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
++ * in its dump file to indicate that the file is xen dump-core
++ * file. This note doesn't have any other information.
++ * See tools/libxc/xc_core.h for more information.
 + */
-+struct vcpu_guest_context {
-+    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-+#define VGCF_I387_VALID                (1<<0)
-+#define VGCF_IN_KERNEL                 (1<<2)
-+#define _VGCF_i387_valid               0
-+#define VGCF_i387_valid                (1<<_VGCF_i387_valid)
-+#define _VGCF_in_kernel                2
-+#define VGCF_in_kernel                 (1<<_VGCF_in_kernel)
-+#define _VGCF_failsafe_disables_events 3
-+#define VGCF_failsafe_disables_events  (1<<_VGCF_failsafe_disables_events)
-+#define _VGCF_syscall_disables_events  4
-+#define VGCF_syscall_disables_events   (1<<_VGCF_syscall_disables_events)
-+#define _VGCF_online                   5
-+#define VGCF_online                    (1<<_VGCF_online)
-+    unsigned long flags;                    /* VGCF_* flags                 */
-+    struct cpu_user_regs user_regs;         /* User-level CPU registers     */
-+    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
-+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
-+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
-+    /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
-+    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
-+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
-+#ifdef __i386__
-+    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
-+    unsigned long event_callback_eip;
-+    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
-+    unsigned long failsafe_callback_eip;
-+#else
-+    unsigned long event_callback_eip;
-+    unsigned long failsafe_callback_eip;
-+#ifdef __XEN__
-+    union {
-+        unsigned long syscall_callback_eip;
-+        struct {
-+            unsigned int event_callback_cs;    /* compat CS of event cb     */
-+            unsigned int failsafe_callback_cs; /* compat CS of failsafe cb  */
-+        };
-+    };
-+#else
-+    unsigned long syscall_callback_eip;
-+#endif
-+#endif
-+    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
-+#ifdef __x86_64__
-+    /* Segment base addresses. */
-+    uint64_t      fs_base;
-+    uint64_t      gs_base_kernel;
-+    uint64_t      gs_base_user;
-+#endif
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++#define XEN_ELFNOTE_DUMPCORE_NONE               0x2000000
 +
-+struct arch_shared_info {
-+    unsigned long max_pfn;                  /* max pfn that appears in table */
-+    /* Frame containing list of mfns containing list of mfns containing p2m. */
-+    xen_pfn_t     pfn_to_mfn_frame_list_list;
-+    unsigned long nmi_reason;
-+    uint64_t pad[32];
-+};
-+typedef struct arch_shared_info arch_shared_info_t;
++/*
++ * xen dump-core header note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
++ * in its dump file.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_HEADER             0x2000001
 +
-+#endif /* !__ASSEMBLY__ */
++/*
++ * xen dump-core xen version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
++ * in its dump file. It contains the xen version obtained via the
++ * XENVER hypercall.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION        0x2000002
 +
 +/*
-+ * Prefix forces emulation of some non-trapping instructions.
-+ * Currently only CPUID.
++ * xen dump-core format version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
++ * in its dump file. It contains a format version identifier.
++ * See tools/libxc/xc_core.h for more information.
 + */
-+#ifdef __ASSEMBLY__
-+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
-+#define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
-+#else
-+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
-+#define XEN_CPUID          XEN_EMULATE_PREFIX "cpuid"
-+#endif
++#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION     0x2000003
 +
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
++#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
 +
 +/*
 + * Local variables:
@@ -97171,448 +136330,545 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen-x86_32.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen-x86_32.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen-x86_32.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen-x86_32.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,184 @@
-+/******************************************************************************
-+ * xen-x86_32.h
-+ * 
-+ * Guest OS interface to x86 32-bit Xen.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/elfstructs.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/elfstructs.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,527 @@
++#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
++#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
++/*
++ * Copyright (c) 1995, 1996 Erik Theisen.  All rights reserved.
 + *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission
 + *
-+ * Copyright (c) 2004-2007, K A Fraser
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 + */
 +
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++typedef uint8_t		Elf_Byte;
 +
-+/*
-+ * Hypercall interface:
-+ *  Input:  %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
-+ *  Output: %eax
-+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
-+ *  call hypercall_page + hypercall-number * 32
-+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
-+ */
++typedef uint32_t	Elf32_Addr;	/* Unsigned program address */
++typedef uint32_t	Elf32_Off;	/* Unsigned file offset */
++typedef int32_t		Elf32_Sword;	/* Signed large integer */
++typedef uint32_t	Elf32_Word;	/* Unsigned large integer */
++typedef uint16_t	Elf32_Half;	/* Unsigned medium integer */
++
++typedef uint64_t	Elf64_Addr;
++typedef uint64_t	Elf64_Off;
++typedef int32_t		Elf64_Shalf;
++
++typedef int32_t		Elf64_Sword;
++typedef uint32_t	Elf64_Word;
++
++typedef int64_t		Elf64_Sxword;
++typedef uint64_t	Elf64_Xword;
++
++typedef uint32_t	Elf64_Half;
++typedef uint16_t	Elf64_Quarter;
 +
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
 +/*
-+ * Legacy hypercall interface:
-+ * As above, except the entry sequence to the hypervisor is:
-+ *  mov $hypercall-number*32,%eax ; int $0x82
++ * e_ident[] identification indexes
++ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
 + */
-+#define TRAP_INSTR "int $0x82"
-+#endif
++#define EI_MAG0		0		/* file ID */
++#define EI_MAG1		1		/* file ID */
++#define EI_MAG2		2		/* file ID */
++#define EI_MAG3		3		/* file ID */
++#define EI_CLASS	4		/* file class */
++#define EI_DATA		5		/* data encoding */
++#define EI_VERSION	6		/* ELF header version */
++#define EI_OSABI	7		/* OS/ABI ID */
++#define EI_ABIVERSION	8		/* ABI version */
++#define EI_PAD		9		/* start of pad bytes */
++#define EI_NIDENT	16		/* Size of e_ident[] */
++
++/* e_ident[] magic number */
++#define	ELFMAG0		0x7f		/* e_ident[EI_MAG0] */
++#define	ELFMAG1		'E'		/* e_ident[EI_MAG1] */
++#define	ELFMAG2		'L'		/* e_ident[EI_MAG2] */
++#define	ELFMAG3		'F'		/* e_ident[EI_MAG3] */
++#define	ELFMAG		"\177ELF"	/* magic */
++#define	SELFMAG		4		/* size of magic */
++
++/* e_ident[] file class */
++#define	ELFCLASSNONE	0		/* invalid */
++#define	ELFCLASS32	1		/* 32-bit objs */
++#define	ELFCLASS64	2		/* 64-bit objs */
++#define	ELFCLASSNUM	3		/* number of classes */
++
++/* e_ident[] data encoding */
++#define ELFDATANONE	0		/* invalid */
++#define ELFDATA2LSB	1		/* Little-Endian */
++#define ELFDATA2MSB	2		/* Big-Endian */
++#define ELFDATANUM	3		/* number of data encode defines */
++
++/* e_ident[] Operating System/ABI */
++#define ELFOSABI_SYSV		0	/* UNIX System V ABI */
++#define ELFOSABI_HPUX		1	/* HP-UX operating system */
++#define ELFOSABI_NETBSD		2	/* NetBSD */
++#define ELFOSABI_LINUX		3	/* GNU/Linux */
++#define ELFOSABI_HURD		4	/* GNU/Hurd */
++#define ELFOSABI_86OPEN		5	/* 86Open common IA32 ABI */
++#define ELFOSABI_SOLARIS	6	/* Solaris */
++#define ELFOSABI_MONTEREY	7	/* Monterey */
++#define ELFOSABI_IRIX		8	/* IRIX */
++#define ELFOSABI_FREEBSD	9	/* FreeBSD */
++#define ELFOSABI_TRU64		10	/* TRU64 UNIX */
++#define ELFOSABI_MODESTO	11	/* Novell Modesto */
++#define ELFOSABI_OPENBSD	12	/* OpenBSD */
++#define ELFOSABI_ARM		97	/* ARM */
++#define ELFOSABI_STANDALONE	255	/* Standalone (embedded) application */
++
++/* e_ident */
++#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
++                      (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
++                      (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
++                      (ehdr).e_ident[EI_MAG3] == ELFMAG3)
++
++/* ELF Header */
++typedef struct elfhdr {
++	unsigned char	e_ident[EI_NIDENT]; /* ELF Identification */
++	Elf32_Half	e_type;		/* object file type */
++	Elf32_Half	e_machine;	/* machine */
++	Elf32_Word	e_version;	/* object file version */
++	Elf32_Addr	e_entry;	/* virtual entry point */
++	Elf32_Off	e_phoff;	/* program header table offset */
++	Elf32_Off	e_shoff;	/* section header table offset */
++	Elf32_Word	e_flags;	/* processor-specific flags */
++	Elf32_Half	e_ehsize;	/* ELF header size */
++	Elf32_Half	e_phentsize;	/* program header entry size */
++	Elf32_Half	e_phnum;	/* number of program header entries */
++	Elf32_Half	e_shentsize;	/* section header entry size */
++	Elf32_Half	e_shnum;	/* number of section header entries */
++	Elf32_Half	e_shstrndx;	/* section header table's "section
++					   header string table" entry offset */
++} Elf32_Ehdr;
++
++typedef struct {
++	unsigned char	e_ident[EI_NIDENT];	/* Id bytes */
++	Elf64_Quarter	e_type;			/* file type */
++	Elf64_Quarter	e_machine;		/* machine type */
++	Elf64_Half	e_version;		/* version number */
++	Elf64_Addr	e_entry;		/* entry point */
++	Elf64_Off	e_phoff;		/* Program hdr offset */
++	Elf64_Off	e_shoff;		/* Section hdr offset */
++	Elf64_Half	e_flags;		/* Processor flags */
++	Elf64_Quarter	e_ehsize;		/* sizeof ehdr */
++	Elf64_Quarter	e_phentsize;		/* Program header entry size */
++	Elf64_Quarter	e_phnum;		/* Number of program headers */
++	Elf64_Quarter	e_shentsize;		/* Section header entry size */
++	Elf64_Quarter	e_shnum;		/* Number of section headers */
++	Elf64_Quarter	e_shstrndx;		/* String table index */
++} Elf64_Ehdr;
++
++/* e_type */
++#define ET_NONE		0		/* No file type */
++#define ET_REL		1		/* relocatable file */
++#define ET_EXEC		2		/* executable file */
++#define ET_DYN		3		/* shared object file */
++#define ET_CORE		4		/* core file */
++#define ET_NUM		5		/* number of types */
++#define ET_LOPROC	0xff00		/* reserved range for processor */
++#define ET_HIPROC	0xffff		/*  specific e_type */
 +
++/* e_machine */
++#define EM_NONE		0		/* No Machine */
++#define EM_M32		1		/* AT&T WE 32100 */
++#define EM_SPARC	2		/* SPARC */
++#define EM_386		3		/* Intel 80386 */
++#define EM_68K		4		/* Motorola 68000 */
++#define EM_88K		5		/* Motorola 88000 */
++#define EM_486		6		/* Intel 80486 - unused? */
++#define EM_860		7		/* Intel 80860 */
++#define EM_MIPS		8		/* MIPS R3000 Big-Endian only */
 +/*
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
++ * Don't know if EM_MIPS_RS4_BE,
++ * EM_SPARC64, EM_PARISC,
++ * or EM_PPC are ABI compliant
 + */
-+#define FLAT_RING1_CS 0xe019    /* GDT index 259 */
-+#define FLAT_RING1_DS 0xe021    /* GDT index 260 */
-+#define FLAT_RING1_SS 0xe021    /* GDT index 260 */
-+#define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
-+#define FLAT_RING3_DS 0xe033    /* GDT index 262 */
-+#define FLAT_RING3_SS 0xe033    /* GDT index 262 */
++#define EM_MIPS_RS4_BE	10		/* MIPS R4000 Big-Endian */
++#define EM_SPARC64	11		/* SPARC v9 64-bit unoffical */
++#define EM_PARISC	15		/* HPPA */
++#define EM_SPARC32PLUS	18		/* Enhanced instruction set SPARC */
++#define EM_PPC		20		/* PowerPC */
++#define EM_PPC64	21		/* PowerPC 64-bit */
++#define EM_ARM		40		/* Advanced RISC Machines ARM */
++#define EM_ALPHA	41		/* DEC ALPHA */
++#define EM_SPARCV9	43		/* SPARC version 9 */
++#define EM_ALPHA_EXP	0x9026		/* DEC ALPHA */
++#define EM_IA_64	50		/* Intel Merced */
++#define EM_X86_64	62		/* AMD x86-64 architecture */
++#define EM_VAX		75		/* DEC VAX */
 +
-+#define FLAT_KERNEL_CS FLAT_RING1_CS
-+#define FLAT_KERNEL_DS FLAT_RING1_DS
-+#define FLAT_KERNEL_SS FLAT_RING1_SS
-+#define FLAT_USER_CS    FLAT_RING3_CS
-+#define FLAT_USER_DS    FLAT_RING3_DS
-+#define FLAT_USER_SS    FLAT_RING3_SS
++/* Version */
++#define EV_NONE		0		/* Invalid */
++#define EV_CURRENT	1		/* Current */
++#define EV_NUM		2		/* number of versions */
 +
-+#define __HYPERVISOR_VIRT_START_PAE    0xF5800000
-+#define __MACH2PHYS_VIRT_START_PAE     0xF5800000
-+#define __MACH2PHYS_VIRT_END_PAE       0xF6800000
-+#define HYPERVISOR_VIRT_START_PAE      \
-+    mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
-+#define MACH2PHYS_VIRT_START_PAE       \
-+    mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
-+#define MACH2PHYS_VIRT_END_PAE         \
-+    mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
++/* Section Header */
++typedef struct {
++	Elf32_Word	sh_name;	/* name - index into section header
++					   string table section */
++	Elf32_Word	sh_type;	/* type */
++	Elf32_Word	sh_flags;	/* flags */
++	Elf32_Addr	sh_addr;	/* address */
++	Elf32_Off	sh_offset;	/* file offset */
++	Elf32_Word	sh_size;	/* section size */
++	Elf32_Word	sh_link;	/* section header table index link */
++	Elf32_Word	sh_info;	/* extra information */
++	Elf32_Word	sh_addralign;	/* address alignment */
++	Elf32_Word	sh_entsize;	/* section entry size */
++} Elf32_Shdr;
 +
-+#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
-+#define __MACH2PHYS_VIRT_START_NONPAE  0xFC000000
-+#define __MACH2PHYS_VIRT_END_NONPAE    0xFC400000
-+#define HYPERVISOR_VIRT_START_NONPAE   \
-+    mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
-+#define MACH2PHYS_VIRT_START_NONPAE    \
-+    mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
-+#define MACH2PHYS_VIRT_END_NONPAE      \
-+    mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
++typedef struct {
++	Elf64_Half	sh_name;	/* section name */
++	Elf64_Half	sh_type;	/* section type */
++	Elf64_Xword	sh_flags;	/* section flags */
++	Elf64_Addr	sh_addr;	/* virtual address */
++	Elf64_Off	sh_offset;	/* file offset */
++	Elf64_Xword	sh_size;	/* section size */
++	Elf64_Half	sh_link;	/* link to another */
++	Elf64_Half	sh_info;	/* misc info */
++	Elf64_Xword	sh_addralign;	/* memory alignment */
++	Elf64_Xword	sh_entsize;	/* table entry size */
++} Elf64_Shdr;
 +
-+#ifdef CONFIG_X86_PAE
-+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
-+#define __MACH2PHYS_VIRT_START  __MACH2PHYS_VIRT_START_PAE
-+#define __MACH2PHYS_VIRT_END    __MACH2PHYS_VIRT_END_PAE
-+#else
-+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_NONPAE
-+#define __MACH2PHYS_VIRT_START  __MACH2PHYS_VIRT_START_NONPAE
-+#define __MACH2PHYS_VIRT_END    __MACH2PHYS_VIRT_END_NONPAE
-+#endif
++/* Special Section Indexes */
++#define SHN_UNDEF	0		/* undefined */
++#define SHN_LORESERVE	0xff00		/* lower bounds of reserved indexes */
++#define SHN_LOPROC	0xff00		/* reserved range for processor */
++#define SHN_HIPROC	0xff1f		/*   specific section indexes */
++#define SHN_ABS		0xfff1		/* absolute value */
++#define SHN_COMMON	0xfff2		/* common symbol */
++#define SHN_HIRESERVE	0xffff		/* upper bounds of reserved indexes */
++
++/* sh_type */
++#define SHT_NULL	0		/* inactive */
++#define SHT_PROGBITS	1		/* program defined information */
++#define SHT_SYMTAB	2		/* symbol table section */
++#define SHT_STRTAB	3		/* string table section */
++#define SHT_RELA	4		/* relocation section with addends*/
++#define SHT_HASH	5		/* symbol hash table section */
++#define SHT_DYNAMIC	6		/* dynamic section */
++#define SHT_NOTE	7		/* note section */
++#define SHT_NOBITS	8		/* no space section */
++#define SHT_REL		9		/* relation section without addends */
++#define SHT_SHLIB	10		/* reserved - purpose unknown */
++#define SHT_DYNSYM	11		/* dynamic symbol table section */
++#define SHT_NUM		12		/* number of section types */
++#define SHT_LOPROC	0x70000000	/* reserved range for processor */
++#define SHT_HIPROC	0x7fffffff	/*  specific section header types */
++#define SHT_LOUSER	0x80000000	/* reserved range for application */
++#define SHT_HIUSER	0xffffffff	/*  specific indexes */
++
++/* Section names */
++#define ELF_BSS         ".bss"		/* uninitialized data */
++#define ELF_DATA        ".data"		/* initialized data */
++#define ELF_DEBUG       ".debug"	/* debug */
++#define ELF_DYNAMIC     ".dynamic"	/* dynamic linking information */
++#define ELF_DYNSTR      ".dynstr"	/* dynamic string table */
++#define ELF_DYNSYM      ".dynsym"	/* dynamic symbol table */
++#define ELF_FINI        ".fini"		/* termination code */
++#define ELF_GOT         ".got"		/* global offset table */
++#define ELF_HASH        ".hash"		/* symbol hash table */
++#define ELF_INIT        ".init"		/* initialization code */
++#define ELF_REL_DATA    ".rel.data"	/* relocation data */
++#define ELF_REL_FINI    ".rel.fini"	/* relocation termination code */
++#define ELF_REL_INIT    ".rel.init"	/* relocation initialization code */
++#define ELF_REL_DYN     ".rel.dyn"	/* relocaltion dynamic link info */
++#define ELF_REL_RODATA  ".rel.rodata"	/* relocation read-only data */
++#define ELF_REL_TEXT    ".rel.text"	/* relocation code */
++#define ELF_RODATA      ".rodata"	/* read-only data */
++#define ELF_SHSTRTAB    ".shstrtab"	/* section header string table */
++#define ELF_STRTAB      ".strtab"	/* string table */
++#define ELF_SYMTAB      ".symtab"	/* symbol table */
++#define ELF_TEXT        ".text"		/* code */
 +
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#endif
 +
-+#define MACH2PHYS_VIRT_START  mk_unsigned_long(__MACH2PHYS_VIRT_START)
-+#define MACH2PHYS_VIRT_END    mk_unsigned_long(__MACH2PHYS_VIRT_END)
-+#define MACH2PHYS_NR_ENTRIES  ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
-+#endif
++/* Section Attribute Flags - sh_flags */
++#define SHF_WRITE	0x1		/* Writable */
++#define SHF_ALLOC	0x2		/* occupies memory */
++#define SHF_EXECINSTR	0x4		/* executable */
++#define SHF_MASKPROC	0xf0000000	/* reserved bits for processor */
++					/*  specific section attributes */
 +
-+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+#undef __DEFINE_XEN_GUEST_HANDLE
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type)                   \
-+    typedef struct { type *p; }                                 \
-+        __guest_handle_ ## name;                                \
-+    typedef struct { union { type *p; uint64_aligned_t q; }; }  \
-+        __guest_handle_64_ ## name
-+#undef set_xen_guest_handle
-+#define set_xen_guest_handle(hnd, val)                      \
-+    do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0;   \
-+         (hnd).p = val;                                     \
-+    } while ( 0 )
-+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-+#define XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
-+#endif
++/* Symbol Table Entry */
++typedef struct elf32_sym {
++	Elf32_Word	st_name;	/* name - index into string table */
++	Elf32_Addr	st_value;	/* symbol value */
++	Elf32_Word	st_size;	/* symbol size */
++	unsigned char	st_info;	/* type and binding */
++	unsigned char	st_other;	/* 0 - no defined meaning */
++	Elf32_Half	st_shndx;	/* section header index */
++} Elf32_Sym;
 +
-+#ifndef __ASSEMBLY__
++typedef struct {
++	Elf64_Half	st_name;	/* Symbol name index in str table */
++	Elf_Byte	st_info;	/* type / binding attrs */
++	Elf_Byte	st_other;	/* unused */
++	Elf64_Quarter	st_shndx;	/* section index of symbol */
++	Elf64_Xword	st_value;	/* value of symbol */
++	Elf64_Xword	st_size;	/* size of symbol */
++} Elf64_Sym;
 +
-+struct cpu_user_regs {
-+    uint32_t ebx;
-+    uint32_t ecx;
-+    uint32_t edx;
-+    uint32_t esi;
-+    uint32_t edi;
-+    uint32_t ebp;
-+    uint32_t eax;
-+    uint16_t error_code;    /* private */
-+    uint16_t entry_vector;  /* private */
-+    uint32_t eip;
-+    uint16_t cs;
-+    uint8_t  saved_upcall_mask;
-+    uint8_t  _pad0;
-+    uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
-+    uint32_t esp;
-+    uint16_t ss, _pad1;
-+    uint16_t es, _pad2;
-+    uint16_t ds, _pad3;
-+    uint16_t fs, _pad4;
-+    uint16_t gs, _pad5;
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++/* Symbol table index */
++#define STN_UNDEF	0		/* undefined */
 +
-+/*
-+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
-+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
-+ * must use the following accessor macros to pack/unpack valid MFNs.
-+ */
-+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
-+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
++/* Extract symbol info - st_info */
++#define ELF32_ST_BIND(x)	((x) >> 4)
++#define ELF32_ST_TYPE(x)	(((unsigned int) x) & 0xf)
++#define ELF32_ST_INFO(b,t)	(((b) << 4) + ((t) & 0xf))
 +
-+struct arch_vcpu_info {
-+    unsigned long cr2;
-+    unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
-+};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
++#define ELF64_ST_BIND(x)	((x) >> 4)
++#define ELF64_ST_TYPE(x)	(((unsigned int) x) & 0xf)
++#define ELF64_ST_INFO(b,t)	(((b) << 4) + ((t) & 0xf))
 +
-+struct xen_callback {
-+    unsigned long cs;
-+    unsigned long eip;
-+};
-+typedef struct xen_callback xen_callback_t;
++/* Symbol Binding - ELF32_ST_BIND - st_info */
++#define STB_LOCAL	0		/* Local symbol */
++#define STB_GLOBAL	1		/* Global symbol */
++#define STB_WEAK	2		/* like global - lower precedence */
++#define STB_NUM		3		/* number of symbol bindings */
++#define STB_LOPROC	13		/* reserved range for processor */
++#define STB_HIPROC	15		/*  specific symbol bindings */
 +
-+#endif /* !__ASSEMBLY__ */
++/* Symbol type - ELF32_ST_TYPE - st_info */
++#define STT_NOTYPE	0		/* not specified */
++#define STT_OBJECT	1		/* data object */
++#define STT_FUNC	2		/* function */
++#define STT_SECTION	3		/* section */
++#define STT_FILE	4		/* file */
++#define STT_NUM		5		/* number of symbol types */
++#define STT_LOPROC	13		/* reserved range for processor */
++#define STT_HIPROC	15		/*  specific symbol types */
 +
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
++/* Relocation entry with implicit addend */
++typedef struct {
++	Elf32_Addr	r_offset;	/* offset of relocation */
++	Elf32_Word	r_info;		/* symbol table index and type */
++} Elf32_Rel;
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen-x86_64.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen-x86_64.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-x86/xen-x86_64.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86/xen-x86_64.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,211 @@
-+/******************************************************************************
-+ * xen-x86_64.h
-+ * 
-+ * Guest OS interface to x86 64-bit Xen.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
++/* Relocation entry with explicit addend */
++typedef struct {
++	Elf32_Addr	r_offset;	/* offset of relocation */
++	Elf32_Word	r_info;		/* symbol table index and type */
++	Elf32_Sword	r_addend;
++} Elf32_Rela;
 +
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++/* Extract relocation info - r_info */
++#define ELF32_R_SYM(i)		((i) >> 8)
++#define ELF32_R_TYPE(i)		((unsigned char) (i))
++#define ELF32_R_INFO(s,t) 	(((s) << 8) + (unsigned char)(t))
 +
-+/*
-+ * Hypercall interface:
-+ *  Input:  %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
-+ *  Output: %rax
-+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
-+ *  call hypercall_page + hypercall-number * 32
-+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
-+ */
++typedef struct {
++	Elf64_Xword	r_offset;	/* where to do it */
++	Elf64_Xword	r_info;		/* index & type of relocation */
++} Elf64_Rel;
 +
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
-+/*
-+ * Legacy hypercall interface:
-+ * As above, except the entry sequence to the hypervisor is:
-+ *  mov $hypercall-number*32,%eax ; syscall
-+ * Clobbered: %rcx, %r11, argument registers (as above)
-+ */
-+#define TRAP_INSTR "syscall"
-+#endif
++typedef struct {
++	Elf64_Xword	r_offset;	/* where to do it */
++	Elf64_Xword	r_info;		/* index & type of relocation */
++	Elf64_Sxword	r_addend;	/* adjustment value */
++} Elf64_Rela;
 +
-+/*
-+ * 64-bit segment selectors
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
++#define	ELF64_R_SYM(info)	((info) >> 32)
++#define	ELF64_R_TYPE(info)	((info) & 0xFFFFFFFF)
++#define ELF64_R_INFO(s,t) 	(((s) << 32) + (u_int32_t)(t))
 +
-+#define FLAT_RING3_CS32 0xe023  /* GDT index 260 */
-+#define FLAT_RING3_CS64 0xe033  /* GDT index 261 */
-+#define FLAT_RING3_DS32 0xe02b  /* GDT index 262 */
-+#define FLAT_RING3_DS64 0x0000  /* NULL selector */
-+#define FLAT_RING3_SS32 0xe02b  /* GDT index 262 */
-+#define FLAT_RING3_SS64 0xe02b  /* GDT index 262 */
++/* Program Header */
++typedef struct {
++	Elf32_Word	p_type;		/* segment type */
++	Elf32_Off	p_offset;	/* segment offset */
++	Elf32_Addr	p_vaddr;	/* virtual address of segment */
++	Elf32_Addr	p_paddr;	/* physical address - ignored? */
++	Elf32_Word	p_filesz;	/* number of bytes in file for seg. */
++	Elf32_Word	p_memsz;	/* number of bytes in mem. for seg. */
++	Elf32_Word	p_flags;	/* flags */
++	Elf32_Word	p_align;	/* memory alignment */
++} Elf32_Phdr;
 +
-+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
-+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
-+#define FLAT_KERNEL_DS   FLAT_KERNEL_DS64
-+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
-+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
-+#define FLAT_KERNEL_CS   FLAT_KERNEL_CS64
-+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
-+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
-+#define FLAT_KERNEL_SS   FLAT_KERNEL_SS64
++typedef struct {
++	Elf64_Half	p_type;		/* entry type */
++	Elf64_Half	p_flags;	/* flags */
++	Elf64_Off	p_offset;	/* offset */
++	Elf64_Addr	p_vaddr;	/* virtual address */
++	Elf64_Addr	p_paddr;	/* physical address */
++	Elf64_Xword	p_filesz;	/* file size */
++	Elf64_Xword	p_memsz;	/* memory size */
++	Elf64_Xword	p_align;	/* memory & file alignment */
++} Elf64_Phdr;
 +
-+#define FLAT_USER_DS64 FLAT_RING3_DS64
-+#define FLAT_USER_DS32 FLAT_RING3_DS32
-+#define FLAT_USER_DS   FLAT_USER_DS64
-+#define FLAT_USER_CS64 FLAT_RING3_CS64
-+#define FLAT_USER_CS32 FLAT_RING3_CS32
-+#define FLAT_USER_CS   FLAT_USER_CS64
-+#define FLAT_USER_SS64 FLAT_RING3_SS64
-+#define FLAT_USER_SS32 FLAT_RING3_SS32
-+#define FLAT_USER_SS   FLAT_USER_SS64
++/* Segment types - p_type */
++#define PT_NULL		0		/* unused */
++#define PT_LOAD		1		/* loadable segment */
++#define PT_DYNAMIC	2		/* dynamic linking section */
++#define PT_INTERP	3		/* the RTLD */
++#define PT_NOTE		4		/* auxiliary information */
++#define PT_SHLIB	5		/* reserved - purpose undefined */
++#define PT_PHDR		6		/* program header */
++#define PT_NUM		7		/* Number of segment types */
++#define PT_LOPROC	0x70000000	/* reserved range for processor */
++#define PT_HIPROC	0x7fffffff	/*  specific segment types */
 +
-+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
-+#define __HYPERVISOR_VIRT_END   0xFFFF880000000000
-+#define __MACH2PHYS_VIRT_START  0xFFFF800000000000
-+#define __MACH2PHYS_VIRT_END    0xFFFF804000000000
++/* Segment flags - p_flags */
++#define PF_X		0x1		/* Executable */
++#define PF_W		0x2		/* Writable */
++#define PF_R		0x4		/* Readable */
++#define PF_MASKPROC	0xf0000000	/* reserved bits for processor */
++					/*  specific segment flags */
 +
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#define HYPERVISOR_VIRT_END   mk_unsigned_long(__HYPERVISOR_VIRT_END)
-+#endif
++/* Dynamic structure */
++typedef struct {
++	Elf32_Sword	d_tag;		/* controls meaning of d_val */
++	union {
++		Elf32_Word	d_val;	/* Multiple meanings - see d_tag */
++		Elf32_Addr	d_ptr;	/* program virtual address */
++	} d_un;
++} Elf32_Dyn;
 +
-+#define MACH2PHYS_VIRT_START  mk_unsigned_long(__MACH2PHYS_VIRT_START)
-+#define MACH2PHYS_VIRT_END    mk_unsigned_long(__MACH2PHYS_VIRT_END)
-+#define MACH2PHYS_NR_ENTRIES  ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
++typedef struct {
++	Elf64_Xword	d_tag;		/* controls meaning of d_val */
++	union {
++		Elf64_Addr	d_ptr;
++		Elf64_Xword	d_val;
++	} d_un;
++} Elf64_Dyn;
 +
-+#ifndef __ASSEMBLY__
++/* Dynamic Array Tags - d_tag */
++#define DT_NULL		0		/* marks end of _DYNAMIC array */
++#define DT_NEEDED	1		/* string table offset of needed lib */
++#define DT_PLTRELSZ	2		/* size of relocation entries in PLT */
++#define DT_PLTGOT	3		/* address PLT/GOT */
++#define DT_HASH		4		/* address of symbol hash table */
++#define DT_STRTAB	5		/* address of string table */
++#define DT_SYMTAB	6		/* address of symbol table */
++#define DT_RELA		7		/* address of relocation table */
++#define DT_RELASZ	8		/* size of relocation table */
++#define DT_RELAENT	9		/* size of relocation entry */
++#define DT_STRSZ	10		/* size of string table */
++#define DT_SYMENT	11		/* size of symbol table entry */
++#define DT_INIT		12		/* address of initialization func. */
++#define DT_FINI		13		/* address of termination function */
++#define DT_SONAME	14		/* string table offset of shared obj */
++#define DT_RPATH	15		/* string table offset of library
++					   search path */
++#define DT_SYMBOLIC	16		/* start sym search in shared obj. */
++#define DT_REL		17		/* address of rel. tbl. w addends */
++#define DT_RELSZ	18		/* size of DT_REL relocation table */
++#define DT_RELENT	19		/* size of DT_REL relocation entry */
++#define DT_PLTREL	20		/* PLT referenced relocation entry */
++#define DT_DEBUG	21		/* bugger */
++#define DT_TEXTREL	22		/* Allow rel. mod. to unwritable seg */
++#define DT_JMPREL	23		/* add. of PLT's relocation entries */
++#define DT_BIND_NOW	24		/* Bind now regardless of env setting */
++#define DT_NUM		25		/* Number used. */
++#define DT_LOPROC	0x70000000	/* reserved range for processor */
++#define DT_HIPROC	0x7fffffff	/*  specific dynamic array tags */
 +
-+/*
-+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
-+ *  @which == SEGBASE_*  ;  @base == 64-bit base address
-+ * Returns 0 on success.
-+ */
-+#define SEGBASE_FS          0
-+#define SEGBASE_GS_USER     1
-+#define SEGBASE_GS_KERNEL   2
-+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
++/* Standard ELF hashing function */
++unsigned int elf_hash(const unsigned char *name);
 +
 +/*
-+ * int HYPERVISOR_iret(void)
-+ * All arguments are on the kernel stack, in the following format.
-+ * Never returns if successful. Current kernel context is lost.
-+ * The saved CS is mapped as follows:
-+ *   RING0 -> RING3 kernel mode.
-+ *   RING1 -> RING3 kernel mode.
-+ *   RING2 -> RING3 kernel mode.
-+ *   RING3 -> RING3 user mode.
-+ * However RING0 indicates that the guest kernel should return to iteself
-+ * directly with
-+ *      orb   $3,1*8(%rsp)
-+ *      iretq
-+ * If flags contains VGCF_in_syscall:
-+ *   Restore RAX, RIP, RFLAGS, RSP.
-+ *   Discard R11, RCX, CS, SS.
-+ * Otherwise:
-+ *   Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
-+ * All other registers are saved on hypercall entry and restored to user.
++ * Note Definitions
 + */
-+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-+#define _VGCF_in_syscall 8
-+#define VGCF_in_syscall  (1<<_VGCF_in_syscall)
-+#define VGCF_IN_SYSCALL  VGCF_in_syscall
-+struct iret_context {
-+    /* Top of stack (%rsp at point of hypercall). */
-+    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+    /* Bottom of iret stack frame. */
-+};
++typedef struct {
++	Elf32_Word namesz;
++	Elf32_Word descsz;
++	Elf32_Word type;
++} Elf32_Note;
 +
-+#ifdef __GNUC__
-+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
-+#define __DECL_REG(name) union { \
-+    uint64_t r ## name, e ## name; \
-+    uint32_t _e ## name; \
-+}
-+#else
-+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
-+#define __DECL_REG(name) uint64_t r ## name
-+#endif
++typedef struct {
++	Elf64_Half namesz;
++	Elf64_Half descsz;
++	Elf64_Half type;
++} Elf64_Note;
 +
-+struct cpu_user_regs {
-+    uint64_t r15;
-+    uint64_t r14;
-+    uint64_t r13;
-+    uint64_t r12;
-+    __DECL_REG(bp);
-+    __DECL_REG(bx);
-+    uint64_t r11;
-+    uint64_t r10;
-+    uint64_t r9;
-+    uint64_t r8;
-+    __DECL_REG(ax);
-+    __DECL_REG(cx);
-+    __DECL_REG(dx);
-+    __DECL_REG(si);
-+    __DECL_REG(di);
-+    uint32_t error_code;    /* private */
-+    uint32_t entry_vector;  /* private */
-+    __DECL_REG(ip);
-+    uint16_t cs, _pad0[1];
-+    uint8_t  saved_upcall_mask;
-+    uint8_t  _pad1[3];
-+    __DECL_REG(flags);      /* rflags.IF == !saved_upcall_mask */
-+    __DECL_REG(sp);
-+    uint16_t ss, _pad2[3];
-+    uint16_t es, _pad3[3];
-+    uint16_t ds, _pad4[3];
-+    uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
-+    uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
 +
-+#undef __DECL_REG
++#if defined(ELFSIZE)
++#define CONCAT(x,y)	__CONCAT(x,y)
++#define ELFNAME(x)	CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
++#define ELFNAME2(x,y)	CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
++#define ELFNAMEEND(x)	CONCAT(x,CONCAT(_elf,ELFSIZE))
++#define ELFDEFNNAME(x)	CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
++#endif
 +
-+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
-+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
++#if defined(ELFSIZE) && (ELFSIZE == 32)
++#define Elf_Ehdr	Elf32_Ehdr
++#define Elf_Phdr	Elf32_Phdr
++#define Elf_Shdr	Elf32_Shdr
++#define Elf_Sym		Elf32_Sym
++#define Elf_Rel		Elf32_Rel
++#define Elf_RelA	Elf32_Rela
++#define Elf_Dyn		Elf32_Dyn
++#define Elf_Word	Elf32_Word
++#define Elf_Sword	Elf32_Sword
++#define Elf_Addr	Elf32_Addr
++#define Elf_Off		Elf32_Off
++#define Elf_Nhdr	Elf32_Nhdr
++#define Elf_Note	Elf32_Note
 +
-+struct arch_vcpu_info {
-+    unsigned long cr2;
-+    unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
-+};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
++#define ELF_R_SYM	ELF32_R_SYM
++#define ELF_R_TYPE	ELF32_R_TYPE
++#define ELF_R_INFO	ELF32_R_INFO
++#define ELFCLASS	ELFCLASS32
 +
-+typedef unsigned long xen_callback_t;
++#define ELF_ST_BIND	ELF32_ST_BIND
++#define ELF_ST_TYPE	ELF32_ST_TYPE
++#define ELF_ST_INFO	ELF32_ST_INFO
 +
-+#endif /* !__ASSEMBLY__ */
++#define AuxInfo		Aux32Info
++#elif defined(ELFSIZE) && (ELFSIZE == 64)
++#define Elf_Ehdr	Elf64_Ehdr
++#define Elf_Phdr	Elf64_Phdr
++#define Elf_Shdr	Elf64_Shdr
++#define Elf_Sym		Elf64_Sym
++#define Elf_Rel		Elf64_Rel
++#define Elf_RelA	Elf64_Rela
++#define Elf_Dyn		Elf64_Dyn
++#define Elf_Word	Elf64_Word
++#define Elf_Sword	Elf64_Sword
++#define Elf_Addr	Elf64_Addr
++#define Elf_Off		Elf64_Off
++#define Elf_Nhdr	Elf64_Nhdr
++#define Elf_Note	Elf64_Note
 +
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
++#define ELF_R_SYM	ELF64_R_SYM
++#define ELF_R_TYPE	ELF64_R_TYPE
++#define ELF_R_INFO	ELF64_R_INFO
++#define ELFCLASS	ELFCLASS64
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-x86_32.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86_32.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-x86_32.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86_32.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,27 @@
-+/******************************************************************************
-+ * arch-x86_32.h
-+ * 
-+ * Guest OS interface to x86 32-bit Xen.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
++#define ELF_ST_BIND	ELF64_ST_BIND
++#define ELF_ST_TYPE	ELF64_ST_TYPE
++#define ELF_ST_INFO	ELF64_ST_INFO
++
++#define AuxInfo		Aux64Info
++#endif
 +
-+#include "arch-x86/xen.h"
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/arch-x86_64.h tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86_64.h
---- pristine-linux-2.6.18.2/include/xen/interface/arch-x86_64.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/arch-x86_64.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,27 @@
++#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/event_channel.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/event_channel.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,264 @@
 +/******************************************************************************
-+ * arch-x86_64.h
++ * event_channel.h
 + * 
-+ * Guest OS interface to x86 64-bit Xen.
++ * Event channels between domains.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -97632,262 +136888,237 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
-+
-+#include "arch-x86/xen.h"
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/callback.h tmp-linux-2.6-xen.patch/include/xen/interface/callback.h
---- pristine-linux-2.6.18.2/include/xen/interface/callback.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/callback.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,92 @@
-+/******************************************************************************
-+ * callback.h
-+ *
-+ * Register guest OS callbacks with Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
++ * Copyright (c) 2003-2004, K A Fraser.
 + */
 +
-+#ifndef __XEN_PUBLIC_CALLBACK_H__
-+#define __XEN_PUBLIC_CALLBACK_H__
-+
-+#include "xen.h"
++#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
++#define __XEN_PUBLIC_EVENT_CHANNEL_H__
 +
 +/*
 + * Prototype for this hypercall is:
-+ *   long callback_op(int cmd, void *extra_args)
-+ * @cmd        == CALLBACKOP_??? (callback operation).
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
++ *  int event_channel_op(int cmd, void *args)
++ * @cmd  == EVTCHNOP_??? (event-channel operation).
++ * @args == Operation-specific extra arguments (NULL if none).
 + */
 +
-+#define CALLBACKTYPE_event                 0
-+#define CALLBACKTYPE_failsafe              1
-+#define CALLBACKTYPE_syscall               2 /* x86_64 only */
++typedef uint32_t evtchn_port_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
++
 +/*
-+ * sysenter is only available on x86_32 with the
-+ * supervisor_mode_kernel option enabled.
++ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
++ * accepting interdomain bindings from domain <remote_dom>. A fresh port
++ * is allocated in <dom> and returned as <port>.
++ * NOTES:
++ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
++ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
 + */
-+#define CALLBACKTYPE_sysenter              3
-+#define CALLBACKTYPE_nmi                   4
++#define EVTCHNOP_alloc_unbound    6
++struct evtchn_alloc_unbound {
++    /* IN parameters */
++    domid_t dom, remote_dom;
++    /* OUT parameters */
++    evtchn_port_t port;
++};
++typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
 +
 +/*
-+ * Disable event deliver during callback? This flag is ignored for event and
-+ * NMI callbacks: event delivery is unconditionally disabled.
++ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
++ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
++ * a port that is unbound and marked as accepting bindings from the calling
++ * domain. A fresh port is allocated in the calling domain and returned as
++ * <local_port>.
++ * NOTES:
++ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
 + */
-+#define _CALLBACKF_mask_events             0
-+#define CALLBACKF_mask_events              (1U << _CALLBACKF_mask_events)
++#define EVTCHNOP_bind_interdomain 0
++struct evtchn_bind_interdomain {
++    /* IN parameters. */
++    domid_t remote_dom;
++    evtchn_port_t remote_port;
++    /* OUT parameters. */
++    evtchn_port_t local_port;
++};
++typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
 +
 +/*
-+ * Register a callback.
++ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
++ * vcpu.
++ * NOTES:
++ *  1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
++ *     in xen.h for the classification of each VIRQ.
++ *  2. Global VIRQs must be allocated on VCPU0 but can subsequently be
++ *     re-bound via EVTCHNOP_bind_vcpu.
++ *  3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
++ *     The allocated event channel is bound to the specified vcpu and the
++ *     binding cannot be changed.
 + */
-+#define CALLBACKOP_register                0
-+struct callback_register {
-+    uint16_t type;
-+    uint16_t flags;
-+    xen_callback_t address;
++#define EVTCHNOP_bind_virq        1
++struct evtchn_bind_virq {
++    /* IN parameters. */
++    uint32_t virq;
++    uint32_t vcpu;
++    /* OUT parameters. */
++    evtchn_port_t port;
 +};
-+typedef struct callback_register callback_register_t;
-+DEFINE_XEN_GUEST_HANDLE(callback_register_t);
++typedef struct evtchn_bind_virq evtchn_bind_virq_t;
 +
 +/*
-+ * Unregister a callback.
-+ *
-+ * Not all callbacks can be unregistered. -EINVAL will be returned if
-+ * you attempt to unregister such a callback.
++ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
++ * NOTES:
++ *  1. A physical IRQ may be bound to at most one event channel per domain.
++ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
 + */
-+#define CALLBACKOP_unregister              1
-+struct callback_unregister {
-+    uint16_t type;
-+    uint16_t _unused;
++#define EVTCHNOP_bind_pirq        2
++struct evtchn_bind_pirq {
++    /* IN parameters. */
++    uint32_t pirq;
++#define BIND_PIRQ__WILL_SHARE 1
++    uint32_t flags; /* BIND_PIRQ__* */
++    /* OUT parameters. */
++    evtchn_port_t port;
 +};
-+typedef struct callback_unregister callback_unregister_t;
-+DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
-+
-+#endif /* __XEN_PUBLIC_CALLBACK_H__ */
++typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
 +
 +/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
++ * NOTES:
++ *  1. The allocated event channel is bound to the specified vcpu. The binding
++ *     may not be changed.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/COPYING tmp-linux-2.6-xen.patch/include/xen/interface/COPYING
---- pristine-linux-2.6.18.2/include/xen/interface/COPYING	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/COPYING	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,38 @@
-+XEN NOTICE
-+==========
-+
-+This copyright applies to all files within this subdirectory and its
-+subdirectories:
-+  include/public/*.h
-+  include/public/hvm/*.h
-+  include/public/io/*.h
-+
-+The intention is that these files can be freely copied into the source
-+tree of an operating system when porting that OS to run on Xen. Doing
-+so does *not* cause the OS to become subject to the terms of the GPL.
-+
-+All other files in the Xen source distribution are covered by version
-+2 of the GNU General Public License except where explicitly stated
-+otherwise within individual source files.
-+
-+ -- Keir Fraser (on behalf of the Xen team)
-+
-+=====================================================================
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to
-+deal in the Software without restriction, including without limitation the
-+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+sell copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in
-+all copies or substantial portions of the Software.
++#define EVTCHNOP_bind_ipi         7
++struct evtchn_bind_ipi {
++    uint32_t vcpu;
++    /* OUT parameters. */
++    evtchn_port_t port;
++};
++typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
 +
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
-+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
-+DEALINGS IN THE SOFTWARE.
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/dom0_ops.h tmp-linux-2.6-xen.patch/include/xen/interface/dom0_ops.h
---- pristine-linux-2.6.18.2/include/xen/interface/dom0_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/dom0_ops.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,120 @@
-+/******************************************************************************
-+ * dom0_ops.h
-+ * 
-+ * Process command requests from domain-0 guest OS.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2002-2003, B Dragovic
-+ * Copyright (c) 2002-2006, K Fraser
++/*
++ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
++ * interdomain then the remote end is placed in the unbound state
++ * (EVTCHNSTAT_unbound), awaiting a new connection.
 + */
++#define EVTCHNOP_close            3
++struct evtchn_close {
++    /* IN parameters. */
++    evtchn_port_t port;
++};
++typedef struct evtchn_close evtchn_close_t;
 +
-+#ifndef __XEN_PUBLIC_DOM0_OPS_H__
-+#define __XEN_PUBLIC_DOM0_OPS_H__
-+
-+#include "xen.h"
-+#include "platform.h"
-+
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030204
-+#error "dom0_ops.h is a compatibility interface only"
-+#endif
-+
-+#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
-+
-+#define DOM0_SETTIME          XENPF_settime
-+#define dom0_settime          xenpf_settime
-+#define dom0_settime_t        xenpf_settime_t
-+
-+#define DOM0_ADD_MEMTYPE      XENPF_add_memtype
-+#define dom0_add_memtype      xenpf_add_memtype
-+#define dom0_add_memtype_t    xenpf_add_memtype_t
-+
-+#define DOM0_DEL_MEMTYPE      XENPF_del_memtype
-+#define dom0_del_memtype      xenpf_del_memtype
-+#define dom0_del_memtype_t    xenpf_del_memtype_t
-+
-+#define DOM0_READ_MEMTYPE     XENPF_read_memtype
-+#define dom0_read_memtype     xenpf_read_memtype
-+#define dom0_read_memtype_t   xenpf_read_memtype_t
-+
-+#define DOM0_MICROCODE        XENPF_microcode_update
-+#define dom0_microcode        xenpf_microcode_update
-+#define dom0_microcode_t      xenpf_microcode_update_t
++/*
++ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
++ * endpoint is <port>.
++ */
++#define EVTCHNOP_send             4
++struct evtchn_send {
++    /* IN parameters. */
++    evtchn_port_t port;
++};
++typedef struct evtchn_send evtchn_send_t;
 +
-+#define DOM0_PLATFORM_QUIRK   XENPF_platform_quirk
-+#define dom0_platform_quirk   xenpf_platform_quirk
-+#define dom0_platform_quirk_t xenpf_platform_quirk_t
++/*
++ * EVTCHNOP_status: Get the current status of the communication channel which
++ * has an endpoint at <dom, port>.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may obtain the status of an event
++ *     channel for which <dom> is not DOMID_SELF.
++ */
++#define EVTCHNOP_status           5
++struct evtchn_status {
++    /* IN parameters */
++    domid_t  dom;
++    evtchn_port_t port;
++    /* OUT parameters */
++#define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
++#define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
++#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
++#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
++#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
++#define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
++    uint32_t status;
++    uint32_t vcpu;                 /* VCPU to which this channel is bound.   */
++    union {
++        struct {
++            domid_t dom;
++        } unbound; /* EVTCHNSTAT_unbound */
++        struct {
++            domid_t dom;
++            evtchn_port_t port;
++        } interdomain; /* EVTCHNSTAT_interdomain */
++        uint32_t pirq;      /* EVTCHNSTAT_pirq        */
++        uint32_t virq;      /* EVTCHNSTAT_virq        */
++    } u;
++};
++typedef struct evtchn_status evtchn_status_t;
 +
-+typedef uint64_t cpumap_t;
++/*
++ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
++ * event is pending.
++ * NOTES:
++ *  1. IPI-bound channels always notify the vcpu specified at bind time.
++ *     This binding cannot be changed.
++ *  2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
++ *     This binding cannot be changed.
++ *  3. All other channels notify vcpu0 by default. This default is set when
++ *     the channel is allocated (a port that is freed and subsequently reused
++ *     has its binding reset to vcpu0).
++ */
++#define EVTCHNOP_bind_vcpu        8
++struct evtchn_bind_vcpu {
++    /* IN parameters. */
++    evtchn_port_t port;
++    uint32_t vcpu;
++};
++typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
 +
-+/* Unsupported legacy operation -- defined for API compatibility. */
-+#define DOM0_MSR                 15
-+struct dom0_msr {
-+    /* IN variables. */
-+    uint32_t write;
-+    cpumap_t cpu_mask;
-+    uint32_t msr;
-+    uint32_t in1;
-+    uint32_t in2;
-+    /* OUT variables. */
-+    uint32_t out1;
-+    uint32_t out2;
++/*
++ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
++ * a notification to the appropriate VCPU if an event is pending.
++ */
++#define EVTCHNOP_unmask           9
++struct evtchn_unmask {
++    /* IN parameters. */
++    evtchn_port_t port;
 +};
-+typedef struct dom0_msr dom0_msr_t;
-+DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
++typedef struct evtchn_unmask evtchn_unmask_t;
 +
-+/* Unsupported legacy operation -- defined for API compatibility. */
-+#define DOM0_PHYSICAL_MEMORY_MAP 40
-+struct dom0_memory_map_entry {
-+    uint64_t start, end;
-+    uint32_t flags; /* reserved */
-+    uint8_t  is_ram;
++/*
++ * EVTCHNOP_reset: Close all event channels associated with specified domain.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
++ */
++#define EVTCHNOP_reset           10
++struct evtchn_reset {
++    /* IN parameters. */
++    domid_t dom;
 +};
-+typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
-+DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
++typedef struct evtchn_reset evtchn_reset_t;
 +
-+struct dom0_op {
-+    uint32_t cmd;
-+    uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
++/*
++ * Argument to event_channel_op_compat() hypercall. Superceded by new
++ * event_channel_op() hypercall since 0x00030202.
++ */
++struct evtchn_op {
++    uint32_t cmd; /* EVTCHNOP_* */
 +    union {
-+        struct dom0_msr               msr;
-+        struct dom0_settime           settime;
-+        struct dom0_add_memtype       add_memtype;
-+        struct dom0_del_memtype       del_memtype;
-+        struct dom0_read_memtype      read_memtype;
-+        struct dom0_microcode         microcode;
-+        struct dom0_platform_quirk    platform_quirk;
-+        struct dom0_memory_map_entry  physical_memory_map;
-+        uint8_t                       pad[128];
++        struct evtchn_alloc_unbound    alloc_unbound;
++        struct evtchn_bind_interdomain bind_interdomain;
++        struct evtchn_bind_virq        bind_virq;
++        struct evtchn_bind_pirq        bind_pirq;
++        struct evtchn_bind_ipi         bind_ipi;
++        struct evtchn_close            close;
++        struct evtchn_send             send;
++        struct evtchn_status           status;
++        struct evtchn_bind_vcpu        bind_vcpu;
++        struct evtchn_unmask           unmask;
 +    } u;
 +};
-+typedef struct dom0_op dom0_op_t;
-+DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
++typedef struct evtchn_op evtchn_op_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
 +
-+#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
++#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
 +
 +/*
 + * Local variables:
@@ -97898,14 +137129,14 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/domctl.h tmp-linux-2.6-xen.patch/include/xen/interface/domctl.h
---- pristine-linux-2.6.18.2/include/xen/interface/domctl.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/domctl.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,481 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/features.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/features.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,74 @@
 +/******************************************************************************
-+ * domctl.h
++ * features.h
 + * 
-+ * Domain management operations. For use by node control stack.
++ * Feature flags, reported by XENVER_get_features.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -97925,473 +137156,425 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2002-2003, B Dragovic
-+ * Copyright (c) 2002-2006, K Fraser
++ * Copyright (c) 2006, Keir Fraser <keir at xensource.com>
 + */
 +
-+#ifndef __XEN_PUBLIC_DOMCTL_H__
-+#define __XEN_PUBLIC_DOMCTL_H__
++#ifndef __XEN_PUBLIC_FEATURES_H__
++#define __XEN_PUBLIC_FEATURES_H__
 +
-+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
-+#error "domctl operations are intended for use by node control tools only"
-+#endif
++/*
++ * If set, the guest does not need to write-protect its pagetables, and can
++ * update them via direct writes.
++ */
++#define XENFEAT_writable_page_tables       0
 +
-+#include "xen.h"
++/*
++ * If set, the guest does not need to write-protect its segment descriptor
++ * tables, and can update them via direct writes.
++ */
++#define XENFEAT_writable_descriptor_tables 1
 +
-+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
++/*
++ * If set, translation between the guest's 'pseudo-physical' address space
++ * and the host's machine address space are handled by the hypervisor. In this
++ * mode the guest does not need to perform phys-to/from-machine translations
++ * when performing page table operations.
++ */
++#define XENFEAT_auto_translated_physmap    2
 +
-+struct xenctl_cpumap {
-+    XEN_GUEST_HANDLE_64(uint8_t) bitmap;
-+    uint32_t nr_cpus;
-+};
++/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
++#define XENFEAT_supervisor_mode_kernel     3
 +
 +/*
-+ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
-+ * If it is specified as zero, an id is auto-allocated and returned.
++ * If set, the guest does not need to allocate x86 PAE page directories
++ * below 4GB. This flag is usually implied by auto_translated_physmap.
 + */
-+#define XEN_DOMCTL_createdomain       1
-+struct xen_domctl_createdomain {
-+    /* IN parameters */
-+    uint32_t ssidref;
-+    xen_domain_handle_t handle;
-+ /* Is this an HVM guest (as opposed to a PV guest)? */
-+#define _XEN_DOMCTL_CDF_hvm_guest 0
-+#define XEN_DOMCTL_CDF_hvm_guest  (1U<<_XEN_DOMCTL_CDF_hvm_guest)
-+    uint32_t flags;
-+};
-+typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
++#define XENFEAT_pae_pgdir_above_4gb        4
 +
-+#define XEN_DOMCTL_destroydomain      2
-+#define XEN_DOMCTL_pausedomain        3
-+#define XEN_DOMCTL_unpausedomain      4
-+#define XEN_DOMCTL_resumedomain      27
++/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
++#define XENFEAT_mmu_pt_update_preserve_ad  5
 +
-+#define XEN_DOMCTL_getdomaininfo      5
-+struct xen_domctl_getdomaininfo {
-+    /* OUT variables. */
-+    domid_t  domain;              /* Also echoed in domctl.domain */
-+ /* Domain is scheduled to die. */
-+#define _XEN_DOMINF_dying     0
-+#define XEN_DOMINF_dying      (1U<<_XEN_DOMINF_dying)
-+ /* Domain is an HVM guest (as opposed to a PV guest). */
-+#define _XEN_DOMINF_hvm_guest 1
-+#define XEN_DOMINF_hvm_guest  (1U<<_XEN_DOMINF_hvm_guest)
-+ /* The guest OS has shut down. */
-+#define _XEN_DOMINF_shutdown  2
-+#define XEN_DOMINF_shutdown   (1U<<_XEN_DOMINF_shutdown)
-+ /* Currently paused by control software. */
-+#define _XEN_DOMINF_paused    3
-+#define XEN_DOMINF_paused     (1U<<_XEN_DOMINF_paused)
-+ /* Currently blocked pending an event.     */
-+#define _XEN_DOMINF_blocked   4
-+#define XEN_DOMINF_blocked    (1U<<_XEN_DOMINF_blocked)
-+ /* Domain is currently running.            */
-+#define _XEN_DOMINF_running   5
-+#define XEN_DOMINF_running    (1U<<_XEN_DOMINF_running)
-+ /* Being debugged.  */
-+#define _XEN_DOMINF_debugged  6
-+#define XEN_DOMINF_debugged   (1U<<_XEN_DOMINF_debugged)
-+ /* CPU to which this domain is bound.      */
-+#define XEN_DOMINF_cpumask      255
-+#define XEN_DOMINF_cpushift       8
-+ /* XEN_DOMINF_shutdown guest-supplied code.  */
-+#define XEN_DOMINF_shutdownmask 255
-+#define XEN_DOMINF_shutdownshift 16
-+    uint32_t flags;              /* XEN_DOMINF_* */
-+    uint64_aligned_t tot_pages;
-+    uint64_aligned_t max_pages;
-+    uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
-+    uint64_aligned_t cpu_time;
-+    uint32_t nr_online_vcpus;    /* Number of VCPUs currently online. */
-+    uint32_t max_vcpu_id;        /* Maximum VCPUID in use by this domain. */
-+    uint32_t ssidref;
-+    xen_domain_handle_t handle;
-+};
-+typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
++#define XENFEAT_NR_SUBMAPS 1
 +
++#endif /* __XEN_PUBLIC_FEATURES_H__ */
 +
-+#define XEN_DOMCTL_getmemlist         6
-+struct xen_domctl_getmemlist {
-+    /* IN variables. */
-+    /* Max entries to write to output buffer. */
-+    uint64_aligned_t max_pfns;
-+    /* Start index in guest's page list. */
-+    uint64_aligned_t start_pfn;
-+    XEN_GUEST_HANDLE_64(uint64_t) buffer;
-+    /* OUT variables. */
-+    uint64_aligned_t num_pfns;
-+};
-+typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/foreign/Makefile
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/foreign/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,37 @@
++XEN_ROOT=../../../..
++include $(XEN_ROOT)/Config.mk
 +
++architectures := x86_32 x86_64 ia64
++headers := $(patsubst %, %.h, $(architectures))
++scripts := $(wildcard *.py)
 +
-+#define XEN_DOMCTL_getpageframeinfo   7
++.PHONY: all clean check-headers
++all: $(headers) check-headers
 +
-+#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
-+#define XEN_DOMCTL_PFINFO_NOTAB   (0x0U<<28)
-+#define XEN_DOMCTL_PFINFO_L1TAB   (0x1U<<28)
-+#define XEN_DOMCTL_PFINFO_L2TAB   (0x2U<<28)
-+#define XEN_DOMCTL_PFINFO_L3TAB   (0x3U<<28)
-+#define XEN_DOMCTL_PFINFO_L4TAB   (0x4U<<28)
-+#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
-+#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
-+#define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
-+#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
++clean:
++	rm -f $(headers)
++	rm -f checker checker.c $(XEN_TARGET_ARCH).size
++	rm -f *.pyc *.o *~
 +
-+struct xen_domctl_getpageframeinfo {
-+    /* IN variables. */
-+    uint64_aligned_t gmfn; /* GMFN to query */
-+    /* OUT variables. */
-+    /* Is the page PINNED to a type? */
-+    uint32_t type;         /* see above type defs */
-+};
-+typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
++ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
++check-headers: checker
++	./checker > $(XEN_TARGET_ARCH).size
++	diff -u reference.size $(XEN_TARGET_ARCH).size
++checker: checker.c $(headers)
++	$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
++else
++check-headers:
++	@echo "cross build: skipping check"
++endif
 +
++x86_32.h: ../arch-x86/xen-x86_32.h ../arch-x86/xen.h ../xen.h $(scripts)
++	python mkheader.py $* $@ $(filter %.h,$^)
 +
-+#define XEN_DOMCTL_getpageframeinfo2  8
-+struct xen_domctl_getpageframeinfo2 {
-+    /* IN variables. */
-+    uint64_aligned_t num;
-+    /* IN/OUT variables. */
-+    XEN_GUEST_HANDLE_64(uint32_t) array;
-+};
-+typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
++x86_64.h: ../arch-x86/xen-x86_64.h ../arch-x86/xen.h ../xen.h $(scripts)
++	python mkheader.py $* $@ $(filter %.h,$^)
 +
++ia64.h: ../arch-ia64.h ../xen.h $(scripts)
++	python mkheader.py $* $@ $(filter %.h,$^)
 +
-+/*
-+ * Control shadow pagetables operation
-+ */
-+#define XEN_DOMCTL_shadow_op         10
++checker.c: $(scripts)
++	python mkchecker.py $(XEN_TARGET_ARCH) $@ $(architectures)
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/foreign/mkchecker.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/foreign/mkchecker.py	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,58 @@
++#!/usr/bin/python
 +
-+/* Disable shadow mode. */
-+#define XEN_DOMCTL_SHADOW_OP_OFF         0
++import sys;
++from structs import structs;
 +
-+/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE      32
++# command line arguments
++arch    = sys.argv[1];
++outfile = sys.argv[2];
++archs   = sys.argv[3:];
 +
-+/* Log-dirty bitmap operations. */
-+ /* Return the bitmap and clean internal copy for next round. */
-+#define XEN_DOMCTL_SHADOW_OP_CLEAN       11
-+ /* Return the bitmap but do not modify internal copy. */
-+#define XEN_DOMCTL_SHADOW_OP_PEEK        12
++f = open(outfile, "w");
++f.write('''
++/*
++ * sanity checks for generated foreign headers:
++ *  - verify struct sizes
++ *
++ * generated by %s -- DO NOT EDIT
++ */
++#include <stdio.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <inttypes.h>
++#include "../xen.h"
++''');
 +
-+/* Memory allocation accessors. */
-+#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION   30
-+#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION   31
++for a in archs:
++    f.write('#include "%s.h"\n' % a);
 +
-+/* Legacy enable operations. */
-+ /* Equiv. to ENABLE with no mode flags. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST       1
-+ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY   2
-+ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE  3
++f.write('int main(int argc, char *argv[])\n{\n');
 +
-+/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
-+ /*
-+  * Shadow pagetables are refcounted: guest does not use explicit mmu
-+  * operations nor write-protect its pagetables.
-+  */
-+#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  (1 << 1)
-+ /*
-+  * Log pages in a bitmap as they are dirtied.
-+  * Used for live relocation to determine which pages must be re-sent.
-+  */
-+#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
-+ /*
-+  * Automatically translate GPFNs into MFNs.
-+  */
-+#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
-+ /*
-+  * Xen does not steal virtual address space from the guest.
-+  * Requires HVM support.
-+  */
-+#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL  (1 << 4)
++f.write('\tprintf("\\n");');
++f.write('printf("%-25s |", "structs");\n');
++for a in archs:
++    f.write('\tprintf("%%8s", "%s");\n' % a);
++f.write('\tprintf("\\n");');
 +
-+struct xen_domctl_shadow_op_stats {
-+    uint32_t fault_count;
-+    uint32_t dirty_count;
-+};
-+typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
++f.write('\tprintf("\\n");');
++for struct in structs:
++    f.write('\tprintf("%%-25s |", "%s");\n' % struct);
++    for a in archs:
++        if a == arch:
++            s = struct; # native
++        else:
++            s = struct + "_" + a;
++        f.write('#ifdef %s_has_no_%s\n' % (a, struct));
++        f.write('\tprintf("%8s", "-");\n');
++        f.write("#else\n");
++        f.write('\tprintf("%%8zd", sizeof(struct %s));\n' % s);
++        f.write("#endif\n");
 +
-+struct xen_domctl_shadow_op {
-+    /* IN variables. */
-+    uint32_t       op;       /* XEN_DOMCTL_SHADOW_OP_* */
++    f.write('\tprintf("\\n");\n\n');
 +
-+    /* OP_ENABLE */
-+    uint32_t       mode;     /* XEN_DOMCTL_SHADOW_ENABLE_* */
++f.write('\tprintf("\\n");\n');
++f.write('\texit(0);\n');
++f.write('}\n');
 +
-+    /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
-+    uint32_t       mb;       /* Shadow memory allocation in MB */
++f.close();
 +
-+    /* OP_PEEK / OP_CLEAN */
-+    XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap;
-+    uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
-+    struct xen_domctl_shadow_op_stats stats;
-+};
-+typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/foreign/mkheader.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/foreign/mkheader.py	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,167 @@
++#!/usr/bin/python
 +
++import sys, re;
++from structs import unions, structs, defines;
 +
-+#define XEN_DOMCTL_max_mem           11
-+struct xen_domctl_max_mem {
-+    /* IN variables. */
-+    uint64_aligned_t max_memkb;
-+};
-+typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
++# command line arguments
++arch    = sys.argv[1];
++outfile = sys.argv[2];
++infiles = sys.argv[3:];
 +
 +
-+#define XEN_DOMCTL_setvcpucontext    12
-+#define XEN_DOMCTL_getvcpucontext    13
-+struct xen_domctl_vcpucontext {
-+    uint32_t              vcpu;                  /* IN */
-+    XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
-+};
-+typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
++###########################################################################
++# configuration #2: architecture information
 +
++inttypes = {};
++header = {};
++footer = {};
 +
-+#define XEN_DOMCTL_getvcpuinfo       14
-+struct xen_domctl_getvcpuinfo {
-+    /* IN variables. */
-+    uint32_t vcpu;
-+    /* OUT variables. */
-+    uint8_t  online;                  /* currently online (not hotplugged)? */
-+    uint8_t  blocked;                 /* blocked waiting for an event? */
-+    uint8_t  running;                 /* currently scheduled on its CPU? */
-+    uint64_aligned_t cpu_time;        /* total cpu time consumed (ns) */
-+    uint32_t cpu;                     /* current mapping   */
++# x86_32
++inttypes["x86_32"] = {
++    "unsigned long" : "uint32_t",
++    "long"          : "uint32_t",
++    "xen_pfn_t"     : "uint32_t",
 +};
-+typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
-+
++header["x86_32"] = """
++#define __i386___X86_32 1
++#pragma pack(4)
++""";
++footer["x86_32"] = """
++#pragma pack()
++""";
 +
-+/* Get/set which physical cpus a vcpu can execute on. */
-+#define XEN_DOMCTL_setvcpuaffinity    9
-+#define XEN_DOMCTL_getvcpuaffinity   25
-+struct xen_domctl_vcpuaffinity {
-+    uint32_t  vcpu;              /* IN */
-+    struct xenctl_cpumap cpumap; /* IN/OUT */
++# x86_64
++inttypes["x86_64"] = {
++    "unsigned long" : "__align8__ uint64_t",
++    "long"          : "__align8__ uint64_t",
++    "xen_pfn_t"     : "__align8__ uint64_t",
 +};
-+typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
-+
++header["x86_64"] = """
++#ifdef __GNUC__
++# define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
++# define __align8__ __attribute__((aligned (8)))
++#else
++# define __DECL_REG(name) uint64_t r ## name
++# define __align8__ FIXME
++#endif
++#define __x86_64___X86_64 1
++""";
 +
-+#define XEN_DOMCTL_max_vcpus         15
-+struct xen_domctl_max_vcpus {
-+    uint32_t max;           /* maximum number of vcpus */
++# ia64
++inttypes["ia64"] = {
++    "unsigned long" : "__align8__ uint64_t",
++    "long"          : "__align8__ uint64_t",
++    "xen_pfn_t"     : "__align8__ uint64_t",
++    "long double"   : "__align16__ ldouble_t",
 +};
-+typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
++header["ia64"] = """
++#define __align8__ __attribute__((aligned (8)))
++#define __align16__ __attribute__((aligned (16)))
++typedef unsigned char ldouble_t[16];
++""";
 +
 +
-+#define XEN_DOMCTL_scheduler_op      16
-+/* Scheduler types. */
-+#define XEN_SCHEDULER_SEDF     4
-+#define XEN_SCHEDULER_CREDIT   5
-+/* Set or get info? */
-+#define XEN_DOMCTL_SCHEDOP_putinfo 0
-+#define XEN_DOMCTL_SCHEDOP_getinfo 1
-+struct xen_domctl_scheduler_op {
-+    uint32_t sched_id;  /* XEN_SCHEDULER_* */
-+    uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
-+    union {
-+        struct xen_domctl_sched_sedf {
-+            uint64_aligned_t period;
-+            uint64_aligned_t slice;
-+            uint64_aligned_t latency;
-+            uint32_t extratime;
-+            uint32_t weight;
-+        } sedf;
-+        struct xen_domctl_sched_credit {
-+            uint16_t weight;
-+            uint16_t cap;
-+        } credit;
-+    } u;
-+};
-+typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
++###########################################################################
++# main
 +
++input  = "";
++output = "";
++fileid = re.sub("[-.]", "_", "__FOREIGN_%s__" % outfile.upper());
 +
-+#define XEN_DOMCTL_setdomainhandle   17
-+struct xen_domctl_setdomainhandle {
-+    xen_domain_handle_t handle;
-+};
-+typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
++# read input header files
++for name in infiles:
++    f = open(name, "r");
++    input += f.read();
++    f.close();
 +
++# add header
++output += """
++/*
++ * public xen defines and struct for %s
++ * generated by %s -- DO NOT EDIT
++ */
 +
-+#define XEN_DOMCTL_setdebugging      18
-+struct xen_domctl_setdebugging {
-+    uint8_t enable;
-+};
-+typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
++#ifndef %s
++#define %s 1
 +
++""" % (arch, sys.argv[0], fileid, fileid)
 +
-+#define XEN_DOMCTL_irq_permission    19
-+struct xen_domctl_irq_permission {
-+    uint8_t pirq;
-+    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
-+};
-+typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
++if arch in header:
++    output += header[arch];
++    output += "\n";
 +
++# add defines to output
++for line in re.findall("#define[^\n]+", input):
++    for define in defines:
++        regex = "#define\s+%s\\b" % define;
++        match = re.search(regex, line);
++        if None == match:
++            continue;
++        if define.upper()[0] == define[0]:
++            replace = define + "_" + arch.upper();
++        else:
++            replace = define + "_" + arch;
++        regex = "\\b%s\\b" % define;
++        output += re.sub(regex, replace, line) + "\n";
++output += "\n";
 +
-+#define XEN_DOMCTL_iomem_permission  20
-+struct xen_domctl_iomem_permission {
-+    uint64_aligned_t first_mfn;/* first page (physical page number) in range */
-+    uint64_aligned_t nr_mfns;  /* number of pages in range (>0) */
-+    uint8_t  allow_access;     /* allow (!0) or deny (0) access to range? */
-+};
-+typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
++# delete defines, comments, empty lines
++input = re.sub("#define[^\n]+\n", "", input);
++input = re.compile("/\*(.*?)\*/", re.S).sub("", input)
++input = re.compile("\n\s*\n", re.S).sub("\n", input);
 +
++# add unions to output
++for union in unions:
++    regex = "union\s+%s\s*\{(.*?)\n\};" % union;
++    match = re.search(regex, input, re.S)
++    if None == match:
++        output += "#define %s_has_no_%s 1\n" % (arch, union);
++    else:
++        output += "union %s_%s {%s\n};\n" % (union, arch, match.group(1));
++    output += "\n";
 +
-+#define XEN_DOMCTL_ioport_permission 21
-+struct xen_domctl_ioport_permission {
-+    uint32_t first_port;              /* first port int range */
-+    uint32_t nr_ports;                /* size of port range */
-+    uint8_t  allow_access;            /* allow or deny access to range? */
-+};
-+typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
++# add structs to output
++for struct in structs:
++    regex = "struct\s+%s\s*\{(.*?)\n\};" % struct;
++    match = re.search(regex, input, re.S)
++    if None == match:
++        output += "#define %s_has_no_%s 1\n" % (arch, struct);
++    else:
++        output += "struct %s_%s {%s\n};\n" % (struct, arch, match.group(1));
++        output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch);
++    output += "\n";
 +
++# add footer
++if arch in footer:
++    output += footer[arch];
++    output += "\n";
++output += "#endif /* %s */\n" % fileid;
 +
-+#define XEN_DOMCTL_hypercall_init    22
-+struct xen_domctl_hypercall_init {
-+    uint64_aligned_t  gmfn;           /* GMFN to be initialised */
-+};
-+typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
++# replace: defines
++for define in defines:
++    if define.upper()[0] == define[0]:
++        replace = define + "_" + arch.upper();
++    else:
++        replace = define + "_" + arch;
++    output = re.sub("\\b%s\\b" % define, replace, output);
 +
++# replace: unions
++for union in unions:
++    output = re.sub("\\b(union\s+%s)\\b" % union, "\\1_%s" % arch, output);
 +
-+#define XEN_DOMCTL_arch_setup        23
-+#define _XEN_DOMAINSETUP_hvm_guest 0
-+#define XEN_DOMAINSETUP_hvm_guest  (1UL<<_XEN_DOMAINSETUP_hvm_guest)
-+#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save)  */
-+#define XEN_DOMAINSETUP_query  (1UL<<_XEN_DOMAINSETUP_query)
-+typedef struct xen_domctl_arch_setup {
-+    uint64_aligned_t flags;  /* XEN_DOMAINSETUP_* */
-+#ifdef __ia64__
-+    uint64_aligned_t bp;     /* mpaddr of boot param area */
-+    uint64_aligned_t maxmem; /* Highest memory address for MDT.  */
-+    uint64_aligned_t xsi_va; /* Xen shared_info area virtual address.  */
-+    uint32_t hypercall_imm;  /* Break imm for Xen hypercalls.  */
-+#endif
-+} xen_domctl_arch_setup_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
++# replace: structs + struct typedefs
++for struct in structs:
++    output = re.sub("\\b(struct\s+%s)\\b" % struct, "\\1_%s" % arch, output);
++    output = re.sub("\\b(%s)_t\\b" % struct, "\\1_%s_t" % arch, output);
 +
++# replace: integer types
++integers = inttypes[arch].keys();
++integers.sort(lambda a, b: cmp(len(b),len(a)));
++for type in integers:
++    output = re.sub("\\b%s\\b" % type, inttypes[arch][type], output);
 +
-+#define XEN_DOMCTL_settimeoffset     24
-+struct xen_domctl_settimeoffset {
-+    int32_t  time_offset_seconds; /* applied to domain wallclock time */
-+};
-+typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
++# print results
++f = open(outfile, "w");
++f.write(output);
++f.close;
 +
-+ 
-+#define XEN_DOMCTL_gethvmcontext     33
-+#define XEN_DOMCTL_sethvmcontext     34
-+typedef struct xen_domctl_hvmcontext {
-+    uint32_t size; /* IN/OUT: size of buffer / bytes filled */
-+    XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call
-+                                          * gethvmcontext with NULL
-+                                          * buffer to get size
-+                                          * req'd */
-+} xen_domctl_hvmcontext_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/foreign/reference.size
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/foreign/reference.size	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,18 @@
++
++structs                   |  x86_32  x86_64    ia64
++
++start_info                |    1104    1152    1152
++trap_info                 |       8      16       -
++pt_fpreg                  |       -       -      16
++cpu_user_regs             |      68     200       -
++xen_ia64_boot_param       |       -       -      96
++ia64_tr_entry             |       -       -      32
++vcpu_tr_regs              |       -       -     768
++vcpu_guest_context_regs   |       -       -   22176
++vcpu_guest_context        |    2800    5168   22208
++arch_vcpu_info            |      24      16       0
++vcpu_time_info            |      32      32      32
++vcpu_info                 |      64      64      48
++arch_shared_info          |     268     280     272
++shared_info               |    2584    3368    4384
++
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/foreign/structs.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/foreign/structs.py	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,58 @@
++# configuration: what needs translation
 +
++unions  = [ "vcpu_cr_regs",
++            "vcpu_ar_regs" ];
 +
-+#define XEN_DOMCTL_set_address_size  35
-+#define XEN_DOMCTL_get_address_size  36
-+typedef struct xen_domctl_address_size {
-+    uint32_t size;
-+} xen_domctl_address_size_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
++structs = [ "start_info",
++            "trap_info",
++            "pt_fpreg",
++            "cpu_user_regs",
++            "xen_ia64_boot_param",
++            "ia64_tr_entry",
++            "vcpu_tr_regs",
++            "vcpu_guest_context_regs",
++            "vcpu_guest_context",
++            "arch_vcpu_info",
++            "vcpu_time_info",
++            "vcpu_info",
++            "arch_shared_info",
++            "shared_info" ];
 +
++defines = [ "__i386__",
++            "__x86_64__",
 +
-+#define XEN_DOMCTL_real_mode_area    26
-+struct xen_domctl_real_mode_area {
-+    uint32_t log; /* log2 of Real Mode Area size */
-+};
-+typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
++            "FLAT_RING1_CS",
++            "FLAT_RING1_DS",
++            "FLAT_RING1_SS",
 +
++            "FLAT_RING3_CS64",
++            "FLAT_RING3_DS64",
++            "FLAT_RING3_SS64",
++            "FLAT_KERNEL_CS64",
++            "FLAT_KERNEL_DS64",
++            "FLAT_KERNEL_SS64",
 +
-+#define XEN_DOMCTL_sendtrigger       28
-+#define XEN_DOMCTL_SENDTRIGGER_NMI    0
-+#define XEN_DOMCTL_SENDTRIGGER_RESET  1
-+#define XEN_DOMCTL_SENDTRIGGER_INIT   2
-+struct xen_domctl_sendtrigger {
-+    uint32_t  trigger;  /* IN */
-+    uint32_t  vcpu;     /* IN */
-+};
-+typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
++            "FLAT_KERNEL_CS",
++            "FLAT_KERNEL_DS",
++            "FLAT_KERNEL_SS",
++
++            # x86_{32,64}
++            "_VGCF_i387_valid",
++            "VGCF_i387_valid",
++            "_VGCF_in_kernel",
++            "VGCF_in_kernel",
++            "_VGCF_failsafe_disables_events",
++            "VGCF_failsafe_disables_events",
++            "_VGCF_syscall_disables_events",
++            "VGCF_syscall_disables_events",
++            "_VGCF_online",
++            "VGCF_online",
 +
-+ 
-+struct xen_domctl {
-+    uint32_t cmd;
-+    uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
-+    domid_t  domain;
-+    union {
-+        struct xen_domctl_createdomain      createdomain;
-+        struct xen_domctl_getdomaininfo     getdomaininfo;
-+        struct xen_domctl_getmemlist        getmemlist;
-+        struct xen_domctl_getpageframeinfo  getpageframeinfo;
-+        struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
-+        struct xen_domctl_vcpuaffinity      vcpuaffinity;
-+        struct xen_domctl_shadow_op         shadow_op;
-+        struct xen_domctl_max_mem           max_mem;
-+        struct xen_domctl_vcpucontext       vcpucontext;
-+        struct xen_domctl_getvcpuinfo       getvcpuinfo;
-+        struct xen_domctl_max_vcpus         max_vcpus;
-+        struct xen_domctl_scheduler_op      scheduler_op;
-+        struct xen_domctl_setdomainhandle   setdomainhandle;
-+        struct xen_domctl_setdebugging      setdebugging;
-+        struct xen_domctl_irq_permission    irq_permission;
-+        struct xen_domctl_iomem_permission  iomem_permission;
-+        struct xen_domctl_ioport_permission ioport_permission;
-+        struct xen_domctl_hypercall_init    hypercall_init;
-+        struct xen_domctl_arch_setup        arch_setup;
-+        struct xen_domctl_settimeoffset     settimeoffset;
-+        struct xen_domctl_real_mode_area    real_mode_area;
-+        struct xen_domctl_hvmcontext        hvmcontext;
-+        struct xen_domctl_address_size      address_size;
-+        struct xen_domctl_sendtrigger       sendtrigger;
-+        uint8_t                             pad[128];
-+    } u;
-+};
-+typedef struct xen_domctl xen_domctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
++            # ia64
++            "VGCF_EXTRA_REGS",
 +
-+#endif /* __XEN_PUBLIC_DOMCTL_H__ */
++            # all archs
++            "xen_pfn_to_cr3",
++            "MAX_VIRT_CPUS",
++            "MAX_GUEST_CMDLINE" ];
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/elfnote.h tmp-linux-2.6-xen.patch/include/xen/interface/elfnote.h
---- pristine-linux-2.6.18.2/include/xen/interface/elfnote.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/elfnote.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,233 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/grant_table.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/grant_table.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,431 @@
 +/******************************************************************************
-+ * elfnote.h
-+ *
-+ * Definitions used for the Xen ELF notes.
-+ *
++ * grant_table.h
++ * 
++ * Interface for granting foreign access to page frames, and receiving
++ * page-ownership transfers.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -98410,755 +137593,638 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
-+ */
-+
-+#ifndef __XEN_PUBLIC_ELFNOTE_H__
-+#define __XEN_PUBLIC_ELFNOTE_H__
-+
-+/*
-+ * The notes should live in a PT_NOTE segment and have "Xen" in the
-+ * name field.
-+ *
-+ * Numeric types are either 4 or 8 bytes depending on the content of
-+ * the desc field.
-+ *
-+ * LEGACY indicated the fields in the legacy __xen_guest string which
-+ * this a note type replaces.
-+ */
-+
-+/*
-+ * NAME=VALUE pair (string).
-+ */
-+#define XEN_ELFNOTE_INFO           0
-+
-+/*
-+ * The virtual address of the entry point (numeric).
-+ *
-+ * LEGACY: VIRT_ENTRY
-+ */
-+#define XEN_ELFNOTE_ENTRY          1
-+
-+/* The virtual address of the hypercall transfer page (numeric).
-+ *
-+ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
-+ * number not a virtual address)
-+ */
-+#define XEN_ELFNOTE_HYPERCALL_PAGE 2
-+
-+/* The virtual address where the kernel image should be mapped (numeric).
-+ *
-+ * Defaults to 0.
-+ *
-+ * LEGACY: VIRT_BASE
-+ */
-+#define XEN_ELFNOTE_VIRT_BASE      3
-+
-+/*
-+ * The offset of the ELF paddr field from the acutal required
-+ * psuedo-physical address (numeric).
-+ *
-+ * This is used to maintain backwards compatibility with older kernels
-+ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
-+ * if not present.
-+ *
-+ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
-+ */
-+#define XEN_ELFNOTE_PADDR_OFFSET   4
-+
-+/*
-+ * The version of Xen that we work with (string).
-+ *
-+ * LEGACY: XEN_VER
-+ */
-+#define XEN_ELFNOTE_XEN_VERSION    5
-+
-+/*
-+ * The name of the guest operating system (string).
-+ *
-+ * LEGACY: GUEST_OS
-+ */
-+#define XEN_ELFNOTE_GUEST_OS       6
-+
-+/*
-+ * The version of the guest operating system (string).
-+ *
-+ * LEGACY: GUEST_VER
-+ */
-+#define XEN_ELFNOTE_GUEST_VERSION  7
-+
-+/*
-+ * The loader type (string).
-+ *
-+ * LEGACY: LOADER
++ * Copyright (c) 2004, K A Fraser
 + */
-+#define XEN_ELFNOTE_LOADER         8
 +
-+/*
-+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
-+ * "bimodal").
-+ *
-+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
-+ * may be given as "yes,bimodal" which will cause older Xen to treat
-+ * this kernel as PAE.
-+ *
-+ * LEGACY: PAE (n.b. The legacy interface included a provision to
-+ * indicate 'extended-cr3' support allowing L3 page tables to be
-+ * placed above 4G. It is assumed that any kernel new enough to use
-+ * these ELF notes will include this and therefore "yes" here is
-+ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
-+ */
-+#define XEN_ELFNOTE_PAE_MODE       9
++#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
++#define __XEN_PUBLIC_GRANT_TABLE_H__
 +
-+/*
-+ * The features supported/required by this kernel (string).
-+ *
-+ * The string must consist of a list of feature names (as given in
-+ * features.h, without the "XENFEAT_" prefix) separated by '|'
-+ * characters. If a feature is required for the kernel to function
-+ * then the feature name must be preceded by a '!' character.
-+ *
-+ * LEGACY: FEATURES
-+ */
-+#define XEN_ELFNOTE_FEATURES      10
 +
-+/*
-+ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
-+ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
-+ * of this string as a boolean flag rather than requiring "yes" or
-+ * "no".
++/***********************************
++ * GRANT TABLE REPRESENTATION
 + */
-+#define XEN_ELFNOTE_BSD_SYMTAB    11
 +
-+/*
-+ * The lowest address the hypervisor hole can begin at (numeric).
++/* Some rough guidelines on accessing and updating grant-table entries
++ * in a concurrency-safe manner. For more information, Linux contains a
++ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
++ * 
++ * NB. WMB is a no-op on current-generation x86 processors. However, a
++ *     compiler barrier will still be required.
++ * 
++ * Introducing a valid entry into the grant table:
++ *  1. Write ent->domid.
++ *  2. Write ent->frame:
++ *      GTF_permit_access:   Frame to which access is permitted.
++ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
++ *                           frame, or zero if none.
++ *  3. Write memory barrier (WMB).
++ *  4. Write ent->flags, inc. valid type.
++ * 
++ * Invalidating an unused GTF_permit_access entry:
++ *  1. flags = ent->flags.
++ *  2. Observe that !(flags & (GTF_reading|GTF_writing)).
++ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ *  NB. No need for WMB as reuse of entry is control-dependent on success of
++ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
 + *
-+ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
-+ * also indicates to the hypervisor that the kernel can deal with the
-+ * hole starting at a higher address.
-+ */
-+#define XEN_ELFNOTE_HV_START_LOW  12
-+
-+/*
-+ * List of maddr_t-sized mask/value pairs describing how to recognize
-+ * (non-present) L1 page table entries carrying valid MFNs (numeric).
-+ */
-+#define XEN_ELFNOTE_L1_MFN_VALID  13
-+
-+/*
-+ * Whether or not the guest supports cooperative suspend cancellation.
-+ */
-+#define XEN_ELFNOTE_SUSPEND_CANCEL 14
-+
-+/*
-+ * The number of the highest elfnote defined.
-+ */
-+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
-+
-+/*
-+ * System information exported through crash notes.
++ * Invalidating an in-use GTF_permit_access entry:
++ *  This cannot be done directly. Request assistance from the domain controller
++ *  which can set a timeout on the use of a grant entry and take necessary
++ *  action. (NB. This is not yet implemented!).
++ * 
++ * Invalidating an unused GTF_accept_transfer entry:
++ *  1. flags = ent->flags.
++ *  2. Observe that !(flags & GTF_transfer_committed). [*]
++ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ *  NB. No need for WMB as reuse of entry is control-dependent on success of
++ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ *  [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
++ *      The guest must /not/ modify the grant entry until the address of the
++ *      transferred frame is written. It is safe for the guest to spin waiting
++ *      for this to occur (detect by observing GTF_transfer_completed in
++ *      ent->flags).
 + *
-+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO 
-+ * note in case of a system crash. This note will contain various
-+ * information about the system, see xen/include/xen/elfcore.h.
-+ */
-+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
-+
-+/*
-+ * System registers exported through crash notes.
++ * Invalidating a committed GTF_accept_transfer entry:
++ *  1. Wait for (ent->flags & GTF_transfer_completed).
 + *
-+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS 
-+ * note per cpu in case of a system crash. This note is architecture
-+ * specific and will contain registers not saved in the "CORE" note.
-+ * See xen/include/xen/elfcore.h for more information.
-+ */
-+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
-+
-+
-+/*
-+ * xen dump-core none note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
-+ * in its dump file to indicate that the file is xen dump-core
-+ * file. This note doesn't have any other information.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_NONE               0x2000000
-+
-+/*
-+ * xen dump-core header note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
-+ * in its dump file.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_HEADER             0x2000001
-+
-+/*
-+ * xen dump-core xen version note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
-+ * in its dump file. It contains the xen version obtained via the
-+ * XENVER hypercall.
-+ * See tools/libxc/xc_core.h for more information.
++ * Changing a GTF_permit_access from writable to read-only:
++ *  Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
++ * 
++ * Changing a GTF_permit_access from read-only to writable:
++ *  Use SMP-safe bit-setting instruction.
 + */
-+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION        0x2000002
 +
 +/*
-+ * xen dump-core format version note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
-+ * in its dump file. It contains a format version identifier.
-+ * See tools/libxc/xc_core.h for more information.
++ * A grant table comprises a packed array of grant entries in one or more
++ * page frames shared between Xen and a guest.
++ * [XEN]: This field is written by Xen and read by the sharing guest.
++ * [GST]: This field is written by the guest and read by Xen.
 + */
-+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION     0x2000003
-+
-+#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
++struct grant_entry {
++    /* GTF_xxx: various type and flag information.  [XEN,GST] */
++    uint16_t flags;
++    /* The domain being granted foreign privileges. [GST] */
++    domid_t  domid;
++    /*
++     * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
++     * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
++     */
++    uint32_t frame;
++};
++typedef struct grant_entry grant_entry_t;
 +
 +/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/elfstructs.h tmp-linux-2.6-xen.patch/include/xen/interface/elfstructs.h
---- pristine-linux-2.6.18.2/include/xen/interface/elfstructs.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/elfstructs.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,527 @@
-+#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
-+#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
-+/*
-+ * Copyright (c) 1995, 1996 Erik Theisen.  All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ * 3. The name of the author may not be used to endorse or promote products
-+ *    derived from this software without specific prior written permission
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Type of grant entry.
++ *  GTF_invalid: This grant entry grants no privileges.
++ *  GTF_permit_access: Allow @domid to map/access @frame.
++ *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
++ *                       to this guest. Xen writes the page number to @frame.
 + */
-+
-+typedef uint8_t		Elf_Byte;
-+
-+typedef uint32_t	Elf32_Addr;	/* Unsigned program address */
-+typedef uint32_t	Elf32_Off;	/* Unsigned file offset */
-+typedef int32_t		Elf32_Sword;	/* Signed large integer */
-+typedef uint32_t	Elf32_Word;	/* Unsigned large integer */
-+typedef uint16_t	Elf32_Half;	/* Unsigned medium integer */
-+
-+typedef uint64_t	Elf64_Addr;
-+typedef uint64_t	Elf64_Off;
-+typedef int32_t		Elf64_Shalf;
-+
-+typedef int32_t		Elf64_Sword;
-+typedef uint32_t	Elf64_Word;
-+
-+typedef int64_t		Elf64_Sxword;
-+typedef uint64_t	Elf64_Xword;
-+
-+typedef uint32_t	Elf64_Half;
-+typedef uint16_t	Elf64_Quarter;
++#define GTF_invalid         (0U<<0)
++#define GTF_permit_access   (1U<<0)
++#define GTF_accept_transfer (2U<<0)
++#define GTF_type_mask       (3U<<0)
 +
 +/*
-+ * e_ident[] identification indexes
-+ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
++ * Subflags for GTF_permit_access.
++ *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
++ *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
++ *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
++ *  GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
 + */
-+#define EI_MAG0		0		/* file ID */
-+#define EI_MAG1		1		/* file ID */
-+#define EI_MAG2		2		/* file ID */
-+#define EI_MAG3		3		/* file ID */
-+#define EI_CLASS	4		/* file class */
-+#define EI_DATA		5		/* data encoding */
-+#define EI_VERSION	6		/* ELF header version */
-+#define EI_OSABI	7		/* OS/ABI ID */
-+#define EI_ABIVERSION	8		/* ABI version */
-+#define EI_PAD		9		/* start of pad bytes */
-+#define EI_NIDENT	16		/* Size of e_ident[] */
-+
-+/* e_ident[] magic number */
-+#define	ELFMAG0		0x7f		/* e_ident[EI_MAG0] */
-+#define	ELFMAG1		'E'		/* e_ident[EI_MAG1] */
-+#define	ELFMAG2		'L'		/* e_ident[EI_MAG2] */
-+#define	ELFMAG3		'F'		/* e_ident[EI_MAG3] */
-+#define	ELFMAG		"\177ELF"	/* magic */
-+#define	SELFMAG		4		/* size of magic */
-+
-+/* e_ident[] file class */
-+#define	ELFCLASSNONE	0		/* invalid */
-+#define	ELFCLASS32	1		/* 32-bit objs */
-+#define	ELFCLASS64	2		/* 64-bit objs */
-+#define	ELFCLASSNUM	3		/* number of classes */
-+
-+/* e_ident[] data encoding */
-+#define ELFDATANONE	0		/* invalid */
-+#define ELFDATA2LSB	1		/* Little-Endian */
-+#define ELFDATA2MSB	2		/* Big-Endian */
-+#define ELFDATANUM	3		/* number of data encode defines */
-+
-+/* e_ident[] Operating System/ABI */
-+#define ELFOSABI_SYSV		0	/* UNIX System V ABI */
-+#define ELFOSABI_HPUX		1	/* HP-UX operating system */
-+#define ELFOSABI_NETBSD		2	/* NetBSD */
-+#define ELFOSABI_LINUX		3	/* GNU/Linux */
-+#define ELFOSABI_HURD		4	/* GNU/Hurd */
-+#define ELFOSABI_86OPEN		5	/* 86Open common IA32 ABI */
-+#define ELFOSABI_SOLARIS	6	/* Solaris */
-+#define ELFOSABI_MONTEREY	7	/* Monterey */
-+#define ELFOSABI_IRIX		8	/* IRIX */
-+#define ELFOSABI_FREEBSD	9	/* FreeBSD */
-+#define ELFOSABI_TRU64		10	/* TRU64 UNIX */
-+#define ELFOSABI_MODESTO	11	/* Novell Modesto */
-+#define ELFOSABI_OPENBSD	12	/* OpenBSD */
-+#define ELFOSABI_ARM		97	/* ARM */
-+#define ELFOSABI_STANDALONE	255	/* Standalone (embedded) application */
-+
-+/* e_ident */
-+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
-+                      (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
-+                      (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
-+                      (ehdr).e_ident[EI_MAG3] == ELFMAG3)
-+
-+/* ELF Header */
-+typedef struct elfhdr {
-+	unsigned char	e_ident[EI_NIDENT]; /* ELF Identification */
-+	Elf32_Half	e_type;		/* object file type */
-+	Elf32_Half	e_machine;	/* machine */
-+	Elf32_Word	e_version;	/* object file version */
-+	Elf32_Addr	e_entry;	/* virtual entry point */
-+	Elf32_Off	e_phoff;	/* program header table offset */
-+	Elf32_Off	e_shoff;	/* section header table offset */
-+	Elf32_Word	e_flags;	/* processor-specific flags */
-+	Elf32_Half	e_ehsize;	/* ELF header size */
-+	Elf32_Half	e_phentsize;	/* program header entry size */
-+	Elf32_Half	e_phnum;	/* number of program header entries */
-+	Elf32_Half	e_shentsize;	/* section header entry size */
-+	Elf32_Half	e_shnum;	/* number of section header entries */
-+	Elf32_Half	e_shstrndx;	/* section header table's "section
-+					   header string table" entry offset */
-+} Elf32_Ehdr;
-+
-+typedef struct {
-+	unsigned char	e_ident[EI_NIDENT];	/* Id bytes */
-+	Elf64_Quarter	e_type;			/* file type */
-+	Elf64_Quarter	e_machine;		/* machine type */
-+	Elf64_Half	e_version;		/* version number */
-+	Elf64_Addr	e_entry;		/* entry point */
-+	Elf64_Off	e_phoff;		/* Program hdr offset */
-+	Elf64_Off	e_shoff;		/* Section hdr offset */
-+	Elf64_Half	e_flags;		/* Processor flags */
-+	Elf64_Quarter	e_ehsize;		/* sizeof ehdr */
-+	Elf64_Quarter	e_phentsize;		/* Program header entry size */
-+	Elf64_Quarter	e_phnum;		/* Number of program headers */
-+	Elf64_Quarter	e_shentsize;		/* Section header entry size */
-+	Elf64_Quarter	e_shnum;		/* Number of section headers */
-+	Elf64_Quarter	e_shstrndx;		/* String table index */
-+} Elf64_Ehdr;
-+
-+/* e_type */
-+#define ET_NONE		0		/* No file type */
-+#define ET_REL		1		/* relocatable file */
-+#define ET_EXEC		2		/* executable file */
-+#define ET_DYN		3		/* shared object file */
-+#define ET_CORE		4		/* core file */
-+#define ET_NUM		5		/* number of types */
-+#define ET_LOPROC	0xff00		/* reserved range for processor */
-+#define ET_HIPROC	0xffff		/*  specific e_type */
++#define _GTF_readonly       (2)
++#define GTF_readonly        (1U<<_GTF_readonly)
++#define _GTF_reading        (3)
++#define GTF_reading         (1U<<_GTF_reading)
++#define _GTF_writing        (4)
++#define GTF_writing         (1U<<_GTF_writing)
++#define _GTF_PWT            (5)
++#define GTF_PWT             (1U<<_GTF_PWT)
++#define _GTF_PCD            (6)
++#define GTF_PCD             (1U<<_GTF_PCD)
++#define _GTF_PAT            (7)
++#define GTF_PAT             (1U<<_GTF_PAT)
 +
-+/* e_machine */
-+#define EM_NONE		0		/* No Machine */
-+#define EM_M32		1		/* AT&T WE 32100 */
-+#define EM_SPARC	2		/* SPARC */
-+#define EM_386		3		/* Intel 80386 */
-+#define EM_68K		4		/* Motorola 68000 */
-+#define EM_88K		5		/* Motorola 88000 */
-+#define EM_486		6		/* Intel 80486 - unused? */
-+#define EM_860		7		/* Intel 80860 */
-+#define EM_MIPS		8		/* MIPS R3000 Big-Endian only */
 +/*
-+ * Don't know if EM_MIPS_RS4_BE,
-+ * EM_SPARC64, EM_PARISC,
-+ * or EM_PPC are ABI compliant
++ * Subflags for GTF_accept_transfer:
++ *  GTF_transfer_committed: Xen sets this flag to indicate that it is committed
++ *      to transferring ownership of a page frame. When a guest sees this flag
++ *      it must /not/ modify the grant entry until GTF_transfer_completed is
++ *      set by Xen.
++ *  GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
++ *      after reading GTF_transfer_committed. Xen will always write the frame
++ *      address, followed by ORing this flag, in a timely manner.
 + */
-+#define EM_MIPS_RS4_BE	10		/* MIPS R4000 Big-Endian */
-+#define EM_SPARC64	11		/* SPARC v9 64-bit unoffical */
-+#define EM_PARISC	15		/* HPPA */
-+#define EM_SPARC32PLUS	18		/* Enhanced instruction set SPARC */
-+#define EM_PPC		20		/* PowerPC */
-+#define EM_PPC64	21		/* PowerPC 64-bit */
-+#define EM_ARM		40		/* Advanced RISC Machines ARM */
-+#define EM_ALPHA	41		/* DEC ALPHA */
-+#define EM_SPARCV9	43		/* SPARC version 9 */
-+#define EM_ALPHA_EXP	0x9026		/* DEC ALPHA */
-+#define EM_IA_64	50		/* Intel Merced */
-+#define EM_X86_64	62		/* AMD x86-64 architecture */
-+#define EM_VAX		75		/* DEC VAX */
++#define _GTF_transfer_committed (2)
++#define GTF_transfer_committed  (1U<<_GTF_transfer_committed)
++#define _GTF_transfer_completed (3)
++#define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
 +
-+/* Version */
-+#define EV_NONE		0		/* Invalid */
-+#define EV_CURRENT	1		/* Current */
-+#define EV_NUM		2		/* number of versions */
 +
-+/* Section Header */
-+typedef struct {
-+	Elf32_Word	sh_name;	/* name - index into section header
-+					   string table section */
-+	Elf32_Word	sh_type;	/* type */
-+	Elf32_Word	sh_flags;	/* flags */
-+	Elf32_Addr	sh_addr;	/* address */
-+	Elf32_Off	sh_offset;	/* file offset */
-+	Elf32_Word	sh_size;	/* section size */
-+	Elf32_Word	sh_link;	/* section header table index link */
-+	Elf32_Word	sh_info;	/* extra information */
-+	Elf32_Word	sh_addralign;	/* address alignment */
-+	Elf32_Word	sh_entsize;	/* section entry size */
-+} Elf32_Shdr;
++/***********************************
++ * GRANT TABLE QUERIES AND USES
++ */
 +
-+typedef struct {
-+	Elf64_Half	sh_name;	/* section name */
-+	Elf64_Half	sh_type;	/* section type */
-+	Elf64_Xword	sh_flags;	/* section flags */
-+	Elf64_Addr	sh_addr;	/* virtual address */
-+	Elf64_Off	sh_offset;	/* file offset */
-+	Elf64_Xword	sh_size;	/* section size */
-+	Elf64_Half	sh_link;	/* link to another */
-+	Elf64_Half	sh_info;	/* misc info */
-+	Elf64_Xword	sh_addralign;	/* memory alignment */
-+	Elf64_Xword	sh_entsize;	/* table entry size */
-+} Elf64_Shdr;
++/*
++ * Reference to a grant entry in a specified domain's grant table.
++ */
++typedef uint32_t grant_ref_t;
 +
-+/* Special Section Indexes */
-+#define SHN_UNDEF	0		/* undefined */
-+#define SHN_LORESERVE	0xff00		/* lower bounds of reserved indexes */
-+#define SHN_LOPROC	0xff00		/* reserved range for processor */
-+#define SHN_HIPROC	0xff1f		/*   specific section indexes */
-+#define SHN_ABS		0xfff1		/* absolute value */
-+#define SHN_COMMON	0xfff2		/* common symbol */
-+#define SHN_HIRESERVE	0xffff		/* upper bounds of reserved indexes */
++/*
++ * Handle to track a mapping created via a grant reference.
++ */
++typedef uint32_t grant_handle_t;
 +
-+/* sh_type */
-+#define SHT_NULL	0		/* inactive */
-+#define SHT_PROGBITS	1		/* program defined information */
-+#define SHT_SYMTAB	2		/* symbol table section */
-+#define SHT_STRTAB	3		/* string table section */
-+#define SHT_RELA	4		/* relocation section with addends*/
-+#define SHT_HASH	5		/* symbol hash table section */
-+#define SHT_DYNAMIC	6		/* dynamic section */
-+#define SHT_NOTE	7		/* note section */
-+#define SHT_NOBITS	8		/* no space section */
-+#define SHT_REL		9		/* relation section without addends */
-+#define SHT_SHLIB	10		/* reserved - purpose unknown */
-+#define SHT_DYNSYM	11		/* dynamic symbol table section */
-+#define SHT_NUM		12		/* number of section types */
-+#define SHT_LOPROC	0x70000000	/* reserved range for processor */
-+#define SHT_HIPROC	0x7fffffff	/*  specific section header types */
-+#define SHT_LOUSER	0x80000000	/* reserved range for application */
-+#define SHT_HIUSER	0xffffffff	/*  specific indexes */
++/*
++ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
++ * by devices and/or host CPUs. If successful, <handle> is a tracking number
++ * that must be presented later to destroy the mapping(s). On error, <handle>
++ * is a negative status code.
++ * NOTES:
++ *  1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
++ *     via which I/O devices may access the granted frame.
++ *  2. If GNTMAP_host_map is specified then a mapping will be added at
++ *     either a host virtual address in the current address space, or at
++ *     a PTE at the specified machine address.  The type of mapping to
++ *     perform is selected through the GNTMAP_contains_pte flag, and the 
++ *     address is specified in <host_addr>.
++ *  3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
++ *     host mapping is destroyed by other means then it is *NOT* guaranteed
++ *     to be accounted to the correct grant reference!
++ */
++#define GNTTABOP_map_grant_ref        0
++struct gnttab_map_grant_ref {
++    /* IN parameters. */
++    uint64_t host_addr;
++    uint32_t flags;               /* GNTMAP_* */
++    grant_ref_t ref;
++    domid_t  dom;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++    grant_handle_t handle;
++    uint64_t dev_bus_addr;
++};
++typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
 +
-+/* Section names */
-+#define ELF_BSS         ".bss"		/* uninitialized data */
-+#define ELF_DATA        ".data"		/* initialized data */
-+#define ELF_DEBUG       ".debug"	/* debug */
-+#define ELF_DYNAMIC     ".dynamic"	/* dynamic linking information */
-+#define ELF_DYNSTR      ".dynstr"	/* dynamic string table */
-+#define ELF_DYNSYM      ".dynsym"	/* dynamic symbol table */
-+#define ELF_FINI        ".fini"		/* termination code */
-+#define ELF_GOT         ".got"		/* global offset table */
-+#define ELF_HASH        ".hash"		/* symbol hash table */
-+#define ELF_INIT        ".init"		/* initialization code */
-+#define ELF_REL_DATA    ".rel.data"	/* relocation data */
-+#define ELF_REL_FINI    ".rel.fini"	/* relocation termination code */
-+#define ELF_REL_INIT    ".rel.init"	/* relocation initialization code */
-+#define ELF_REL_DYN     ".rel.dyn"	/* relocaltion dynamic link info */
-+#define ELF_REL_RODATA  ".rel.rodata"	/* relocation read-only data */
-+#define ELF_REL_TEXT    ".rel.text"	/* relocation code */
-+#define ELF_RODATA      ".rodata"	/* read-only data */
-+#define ELF_SHSTRTAB    ".shstrtab"	/* section header string table */
-+#define ELF_STRTAB      ".strtab"	/* string table */
-+#define ELF_SYMTAB      ".symtab"	/* symbol table */
-+#define ELF_TEXT        ".text"		/* code */
++/*
++ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
++ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
++ * field is ignored. If non-zero, they must refer to a device/host mapping
++ * that is tracked by <handle>
++ * NOTES:
++ *  1. The call may fail in an undefined manner if either mapping is not
++ *     tracked by <handle>.
++ *  3. After executing a batch of unmaps, it is guaranteed that no stale
++ *     mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_grant_ref      1
++struct gnttab_unmap_grant_ref {
++    /* IN parameters. */
++    uint64_t host_addr;
++    uint64_t dev_bus_addr;
++    grant_handle_t handle;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++};
++typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
 +
++/*
++ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
++ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
++ * Only <nr_frames> addresses are written, even if the table is larger.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ *  3. Xen may not support more than a single grant-table page per domain.
++ */
++#define GNTTABOP_setup_table          2
++struct gnttab_setup_table {
++    /* IN parameters. */
++    domid_t  dom;
++    uint32_t nr_frames;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++    XEN_GUEST_HANDLE(ulong) frame_list;
++};
++typedef struct gnttab_setup_table gnttab_setup_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
 +
-+/* Section Attribute Flags - sh_flags */
-+#define SHF_WRITE	0x1		/* Writable */
-+#define SHF_ALLOC	0x2		/* occupies memory */
-+#define SHF_EXECINSTR	0x4		/* executable */
-+#define SHF_MASKPROC	0xf0000000	/* reserved bits for processor */
-+					/*  specific section attributes */
++/*
++ * GNTTABOP_dump_table: Dump the contents of the grant table to the
++ * xen console. Debugging use only.
++ */
++#define GNTTABOP_dump_table           3
++struct gnttab_dump_table {
++    /* IN parameters. */
++    domid_t dom;
++    /* OUT parameters. */
++    int16_t status;               /* GNTST_* */
++};
++typedef struct gnttab_dump_table gnttab_dump_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
 +
-+/* Symbol Table Entry */
-+typedef struct elf32_sym {
-+	Elf32_Word	st_name;	/* name - index into string table */
-+	Elf32_Addr	st_value;	/* symbol value */
-+	Elf32_Word	st_size;	/* symbol size */
-+	unsigned char	st_info;	/* type and binding */
-+	unsigned char	st_other;	/* 0 - no defined meaning */
-+	Elf32_Half	st_shndx;	/* section header index */
-+} Elf32_Sym;
++/*
++ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
++ * foreign domain has previously registered its interest in the transfer via
++ * <domid, ref>.
++ * 
++ * Note that, even if the transfer fails, the specified page no longer belongs
++ * to the calling domain *unless* the error is GNTST_bad_page.
++ */
++#define GNTTABOP_transfer                4
++struct gnttab_transfer {
++    /* IN parameters. */
++    xen_pfn_t     mfn;
++    domid_t       domid;
++    grant_ref_t   ref;
++    /* OUT parameters. */
++    int16_t       status;
++};
++typedef struct gnttab_transfer gnttab_transfer_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
 +
-+typedef struct {
-+	Elf64_Half	st_name;	/* Symbol name index in str table */
-+	Elf_Byte	st_info;	/* type / binding attrs */
-+	Elf_Byte	st_other;	/* unused */
-+	Elf64_Quarter	st_shndx;	/* section index of symbol */
-+	Elf64_Xword	st_value;	/* value of symbol */
-+	Elf64_Xword	st_size;	/* size of symbol */
-+} Elf64_Sym;
 +
-+/* Symbol table index */
-+#define STN_UNDEF	0		/* undefined */
++/*
++ * GNTTABOP_copy: Hypervisor based copy
++ * source and destinations can be eithers MFNs or, for foreign domains,
++ * grant references. the foreign domain has to grant read/write access
++ * in its grant table.
++ *
++ * The flags specify what type source and destinations are (either MFN
++ * or grant reference).
++ *
++ * Note that this can also be used to copy data between two domains
++ * via a third party if the source and destination domains had previously
++ * grant appropriate access to their pages to the third party.
++ *
++ * source_offset specifies an offset in the source frame, dest_offset
++ * the offset in the target frame and  len specifies the number of
++ * bytes to be copied.
++ */
 +
-+/* Extract symbol info - st_info */
-+#define ELF32_ST_BIND(x)	((x) >> 4)
-+#define ELF32_ST_TYPE(x)	(((unsigned int) x) & 0xf)
-+#define ELF32_ST_INFO(b,t)	(((b) << 4) + ((t) & 0xf))
++#define _GNTCOPY_source_gref      (0)
++#define GNTCOPY_source_gref       (1<<_GNTCOPY_source_gref)
++#define _GNTCOPY_dest_gref        (1)
++#define GNTCOPY_dest_gref         (1<<_GNTCOPY_dest_gref)
 +
-+#define ELF64_ST_BIND(x)	((x) >> 4)
-+#define ELF64_ST_TYPE(x)	(((unsigned int) x) & 0xf)
-+#define ELF64_ST_INFO(b,t)	(((b) << 4) + ((t) & 0xf))
++#define GNTTABOP_copy                 5
++typedef struct gnttab_copy {
++    /* IN parameters. */
++    struct {
++        union {
++            grant_ref_t ref;
++            xen_pfn_t   gmfn;
++        } u;
++        domid_t  domid;
++        uint16_t offset;
++    } source, dest;
++    uint16_t      len;
++    uint16_t      flags;          /* GNTCOPY_* */
++    /* OUT parameters. */
++    int16_t       status;
++} gnttab_copy_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
 +
-+/* Symbol Binding - ELF32_ST_BIND - st_info */
-+#define STB_LOCAL	0		/* Local symbol */
-+#define STB_GLOBAL	1		/* Global symbol */
-+#define STB_WEAK	2		/* like global - lower precedence */
-+#define STB_NUM		3		/* number of symbol bindings */
-+#define STB_LOPROC	13		/* reserved range for processor */
-+#define STB_HIPROC	15		/*  specific symbol bindings */
++/*
++ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
++ * grant table.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ */
++#define GNTTABOP_query_size           6
++struct gnttab_query_size {
++    /* IN parameters. */
++    domid_t  dom;
++    /* OUT parameters. */
++    uint32_t nr_frames;
++    uint32_t max_nr_frames;
++    int16_t  status;              /* GNTST_* */
++};
++typedef struct gnttab_query_size gnttab_query_size_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
 +
-+/* Symbol type - ELF32_ST_TYPE - st_info */
-+#define STT_NOTYPE	0		/* not specified */
-+#define STT_OBJECT	1		/* data object */
-+#define STT_FUNC	2		/* function */
-+#define STT_SECTION	3		/* section */
-+#define STT_FILE	4		/* file */
-+#define STT_NUM		5		/* number of symbol types */
-+#define STT_LOPROC	13		/* reserved range for processor */
-+#define STT_HIPROC	15		/*  specific symbol types */
++/*
++ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
++ * tracked by <handle> but atomically replace the page table entry with one
++ * pointing to the machine address under <new_addr>.  <new_addr> will be
++ * redirected to the null entry.
++ * NOTES:
++ *  1. The call may fail in an undefined manner if either mapping is not
++ *     tracked by <handle>.
++ *  2. After executing a batch of unmaps, it is guaranteed that no stale
++ *     mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_and_replace    7
++struct gnttab_unmap_and_replace {
++    /* IN parameters. */
++    uint64_t host_addr;
++    uint64_t new_addr;
++    grant_handle_t handle;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++};
++typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
 +
-+/* Relocation entry with implicit addend */
-+typedef struct {
-+	Elf32_Addr	r_offset;	/* offset of relocation */
-+	Elf32_Word	r_info;		/* symbol table index and type */
-+} Elf32_Rel;
 +
-+/* Relocation entry with explicit addend */
-+typedef struct {
-+	Elf32_Addr	r_offset;	/* offset of relocation */
-+	Elf32_Word	r_info;		/* symbol table index and type */
-+	Elf32_Sword	r_addend;
-+} Elf32_Rela;
++/*
++ * Bitfield values for update_pin_status.flags.
++ */
++ /* Map the grant entry for access by I/O devices. */
++#define _GNTMAP_device_map      (0)
++#define GNTMAP_device_map       (1<<_GNTMAP_device_map)
++ /* Map the grant entry for access by host CPUs. */
++#define _GNTMAP_host_map        (1)
++#define GNTMAP_host_map         (1<<_GNTMAP_host_map)
++ /* Accesses to the granted frame will be restricted to read-only access. */
++#define _GNTMAP_readonly        (2)
++#define GNTMAP_readonly         (1<<_GNTMAP_readonly)
++ /*
++  * GNTMAP_host_map subflag:
++  *  0 => The host mapping is usable only by the guest OS.
++  *  1 => The host mapping is usable by guest OS + current application.
++  */
++#define _GNTMAP_application_map (3)
++#define GNTMAP_application_map  (1<<_GNTMAP_application_map)
 +
-+/* Extract relocation info - r_info */
-+#define ELF32_R_SYM(i)		((i) >> 8)
-+#define ELF32_R_TYPE(i)		((unsigned char) (i))
-+#define ELF32_R_INFO(s,t) 	(((s) << 8) + (unsigned char)(t))
++ /*
++  * GNTMAP_contains_pte subflag:
++  *  0 => This map request contains a host virtual address.
++  *  1 => This map request contains the machine addess of the PTE to update.
++  */
++#define _GNTMAP_contains_pte    (4)
++#define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
 +
-+typedef struct {
-+	Elf64_Xword	r_offset;	/* where to do it */
-+	Elf64_Xword	r_info;		/* index & type of relocation */
-+} Elf64_Rel;
++/*
++ * Values for error status returns. All errors are -ve.
++ */
++#define GNTST_okay             (0)  /* Normal return.                        */
++#define GNTST_general_error    (-1) /* General undefined error.              */
++#define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
++#define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
++#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
++#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
++#define GNTST_bad_dev_addr     (-6) /* Inappropriate device address to unmap.*/
++#define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
++#define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
++#define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
++#define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary.   */
++#define GNTST_address_too_big (-11) /* transfer page address too large.      */
 +
-+typedef struct {
-+	Elf64_Xword	r_offset;	/* where to do it */
-+	Elf64_Xword	r_info;		/* index & type of relocation */
-+	Elf64_Sxword	r_addend;	/* adjustment value */
-+} Elf64_Rela;
++#define GNTTABOP_error_msgs {                   \
++    "okay",                                     \
++    "undefined error",                          \
++    "unrecognised domain id",                   \
++    "invalid grant reference",                  \
++    "invalid mapping handle",                   \
++    "invalid virtual address",                  \
++    "invalid device address",                   \
++    "no spare translation slot in the I/O MMU", \
++    "permission denied",                        \
++    "bad page",                                 \
++    "copy arguments cross page boundary",       \
++    "page address size too large"               \
++}
 +
-+#define	ELF64_R_SYM(info)	((info) >> 32)
-+#define	ELF64_R_TYPE(info)	((info) & 0xFFFFFFFF)
-+#define ELF64_R_INFO(s,t) 	(((s) << 32) + (u_int32_t)(t))
++#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
 +
-+/* Program Header */
-+typedef struct {
-+	Elf32_Word	p_type;		/* segment type */
-+	Elf32_Off	p_offset;	/* segment offset */
-+	Elf32_Addr	p_vaddr;	/* virtual address of segment */
-+	Elf32_Addr	p_paddr;	/* physical address - ignored? */
-+	Elf32_Word	p_filesz;	/* number of bytes in file for seg. */
-+	Elf32_Word	p_memsz;	/* number of bytes in mem. for seg. */
-+	Elf32_Word	p_flags;	/* flags */
-+	Elf32_Word	p_align;	/* memory alignment */
-+} Elf32_Phdr;
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/e820.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/e820.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,34 @@
 +
-+typedef struct {
-+	Elf64_Half	p_type;		/* entry type */
-+	Elf64_Half	p_flags;	/* flags */
-+	Elf64_Off	p_offset;	/* offset */
-+	Elf64_Addr	p_vaddr;	/* virtual address */
-+	Elf64_Addr	p_paddr;	/* physical address */
-+	Elf64_Xword	p_filesz;	/* file size */
-+	Elf64_Xword	p_memsz;	/* memory size */
-+	Elf64_Xword	p_align;	/* memory & file alignment */
-+} Elf64_Phdr;
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
 +
-+/* Segment types - p_type */
-+#define PT_NULL		0		/* unused */
-+#define PT_LOAD		1		/* loadable segment */
-+#define PT_DYNAMIC	2		/* dynamic linking section */
-+#define PT_INTERP	3		/* the RTLD */
-+#define PT_NOTE		4		/* auxiliary information */
-+#define PT_SHLIB	5		/* reserved - purpose undefined */
-+#define PT_PHDR		6		/* program header */
-+#define PT_NUM		7		/* Number of segment types */
-+#define PT_LOPROC	0x70000000	/* reserved range for processor */
-+#define PT_HIPROC	0x7fffffff	/*  specific segment types */
++#ifndef __XEN_PUBLIC_HVM_E820_H__
++#define __XEN_PUBLIC_HVM_E820_H__
 +
-+/* Segment flags - p_flags */
-+#define PF_X		0x1		/* Executable */
-+#define PF_W		0x2		/* Writable */
-+#define PF_R		0x4		/* Readable */
-+#define PF_MASKPROC	0xf0000000	/* reserved bits for processor */
-+					/*  specific segment flags */
++/* E820 location in HVM virtual address space. */
++#define HVM_E820_PAGE        0x00090000
++#define HVM_E820_NR_OFFSET   0x000001E8
++#define HVM_E820_OFFSET      0x000002D0
 +
-+/* Dynamic structure */
-+typedef struct {
-+	Elf32_Sword	d_tag;		/* controls meaning of d_val */
-+	union {
-+		Elf32_Word	d_val;	/* Multiple meanings - see d_tag */
-+		Elf32_Addr	d_ptr;	/* program virtual address */
-+	} d_un;
-+} Elf32_Dyn;
++#define HVM_BELOW_4G_RAM_END        0xF0000000
++#define HVM_BELOW_4G_MMIO_START     HVM_BELOW_4G_RAM_END
++#define HVM_BELOW_4G_MMIO_LENGTH    ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
++
++#endif /* __XEN_PUBLIC_HVM_E820_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/hvm_info_table.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/hvm_info_table.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,41 @@
++/******************************************************************************
++ * hvm/hvm_info_table.h
++ * 
++ * HVM parameter and information table, written into guest memory map.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
 +
-+typedef struct {
-+	Elf64_Xword	d_tag;		/* controls meaning of d_val */
-+	union {
-+		Elf64_Addr	d_ptr;
-+		Elf64_Xword	d_val;
-+	} d_un;
-+} Elf64_Dyn;
++#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
 +
-+/* Dynamic Array Tags - d_tag */
-+#define DT_NULL		0		/* marks end of _DYNAMIC array */
-+#define DT_NEEDED	1		/* string table offset of needed lib */
-+#define DT_PLTRELSZ	2		/* size of relocation entries in PLT */
-+#define DT_PLTGOT	3		/* address PLT/GOT */
-+#define DT_HASH		4		/* address of symbol hash table */
-+#define DT_STRTAB	5		/* address of string table */
-+#define DT_SYMTAB	6		/* address of symbol table */
-+#define DT_RELA		7		/* address of relocation table */
-+#define DT_RELASZ	8		/* size of relocation table */
-+#define DT_RELAENT	9		/* size of relocation entry */
-+#define DT_STRSZ	10		/* size of string table */
-+#define DT_SYMENT	11		/* size of symbol table entry */
-+#define DT_INIT		12		/* address of initialization func. */
-+#define DT_FINI		13		/* address of termination function */
-+#define DT_SONAME	14		/* string table offset of shared obj */
-+#define DT_RPATH	15		/* string table offset of library
-+					   search path */
-+#define DT_SYMBOLIC	16		/* start sym search in shared obj. */
-+#define DT_REL		17		/* address of rel. tbl. w addends */
-+#define DT_RELSZ	18		/* size of DT_REL relocation table */
-+#define DT_RELENT	19		/* size of DT_REL relocation entry */
-+#define DT_PLTREL	20		/* PLT referenced relocation entry */
-+#define DT_DEBUG	21		/* bugger */
-+#define DT_TEXTREL	22		/* Allow rel. mod. to unwritable seg */
-+#define DT_JMPREL	23		/* add. of PLT's relocation entries */
-+#define DT_BIND_NOW	24		/* Bind now regardless of env setting */
-+#define DT_NUM		25		/* Number used. */
-+#define DT_LOPROC	0x70000000	/* reserved range for processor */
-+#define DT_HIPROC	0x7fffffff	/*  specific dynamic array tags */
++#define HVM_INFO_PFN         0x09F
++#define HVM_INFO_OFFSET      0x800
++#define HVM_INFO_PADDR       ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
 +
-+/* Standard ELF hashing function */
-+unsigned int elf_hash(const unsigned char *name);
++struct hvm_info_table {
++    char        signature[8]; /* "HVM INFO" */
++    uint32_t    length;
++    uint8_t     checksum;
++    uint8_t     acpi_enabled;
++    uint8_t     apic_mode;
++    uint32_t    nr_vcpus;
++};
 +
++#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/hvm_op.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/hvm_op.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,131 @@
 +/*
-+ * Note Definitions
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
 + */
-+typedef struct {
-+	Elf32_Word namesz;
-+	Elf32_Word descsz;
-+	Elf32_Word type;
-+} Elf32_Note;
 +
-+typedef struct {
-+	Elf64_Half namesz;
-+	Elf64_Half descsz;
-+	Elf64_Half type;
-+} Elf64_Note;
++#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
++#define __XEN_PUBLIC_HVM_HVM_OP_H__
 +
++/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
++#define HVMOP_set_param           0
++#define HVMOP_get_param           1
++struct xen_hvm_param {
++    domid_t  domid;    /* IN */
++    uint32_t index;    /* IN */
++    uint64_t value;    /* IN/OUT */
++};
++typedef struct xen_hvm_param xen_hvm_param_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
 +
-+#if defined(ELFSIZE)
-+#define CONCAT(x,y)	__CONCAT(x,y)
-+#define ELFNAME(x)	CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
-+#define ELFNAME2(x,y)	CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
-+#define ELFNAMEEND(x)	CONCAT(x,CONCAT(_elf,ELFSIZE))
-+#define ELFDEFNNAME(x)	CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
-+#endif
++/* Set the logical level of one of a domain's PCI INTx wires. */
++#define HVMOP_set_pci_intx_level  2
++struct xen_hvm_set_pci_intx_level {
++    /* Domain to be updated. */
++    domid_t  domid;
++    /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
++    uint8_t  domain, bus, device, intx;
++    /* Assertion level (0 = unasserted, 1 = asserted). */
++    uint8_t  level;
++};
++typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
 +
-+#if defined(ELFSIZE) && (ELFSIZE == 32)
-+#define Elf_Ehdr	Elf32_Ehdr
-+#define Elf_Phdr	Elf32_Phdr
-+#define Elf_Shdr	Elf32_Shdr
-+#define Elf_Sym		Elf32_Sym
-+#define Elf_Rel		Elf32_Rel
-+#define Elf_RelA	Elf32_Rela
-+#define Elf_Dyn		Elf32_Dyn
-+#define Elf_Word	Elf32_Word
-+#define Elf_Sword	Elf32_Sword
-+#define Elf_Addr	Elf32_Addr
-+#define Elf_Off		Elf32_Off
-+#define Elf_Nhdr	Elf32_Nhdr
-+#define Elf_Note	Elf32_Note
++/* Set the logical level of one of a domain's ISA IRQ wires. */
++#define HVMOP_set_isa_irq_level   3
++struct xen_hvm_set_isa_irq_level {
++    /* Domain to be updated. */
++    domid_t  domid;
++    /* ISA device identification, by ISA IRQ (0-15). */
++    uint8_t  isa_irq;
++    /* Assertion level (0 = unasserted, 1 = asserted). */
++    uint8_t  level;
++};
++typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 +
-+#define ELF_R_SYM	ELF32_R_SYM
-+#define ELF_R_TYPE	ELF32_R_TYPE
-+#define ELF_R_INFO	ELF32_R_INFO
-+#define ELFCLASS	ELFCLASS32
++#define HVMOP_set_pci_link_route  4
++struct xen_hvm_set_pci_link_route {
++    /* Domain to be updated. */
++    domid_t  domid;
++    /* PCI link identifier (0-3). */
++    uint8_t  link;
++    /* ISA IRQ (1-15), or 0 (disable link). */
++    uint8_t  isa_irq;
++};
++typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 +
-+#define ELF_ST_BIND	ELF32_ST_BIND
-+#define ELF_ST_TYPE	ELF32_ST_TYPE
-+#define ELF_ST_INFO	ELF32_ST_INFO
++/* Flushes all VCPU TLBs: @arg must be NULL. */
++#define HVMOP_flush_tlbs          5
 +
-+#define AuxInfo		Aux32Info
-+#elif defined(ELFSIZE) && (ELFSIZE == 64)
-+#define Elf_Ehdr	Elf64_Ehdr
-+#define Elf_Phdr	Elf64_Phdr
-+#define Elf_Shdr	Elf64_Shdr
-+#define Elf_Sym		Elf64_Sym
-+#define Elf_Rel		Elf64_Rel
-+#define Elf_RelA	Elf64_Rela
-+#define Elf_Dyn		Elf64_Dyn
-+#define Elf_Word	Elf64_Word
-+#define Elf_Sword	Elf64_Sword
-+#define Elf_Addr	Elf64_Addr
-+#define Elf_Off		Elf64_Off
-+#define Elf_Nhdr	Elf64_Nhdr
-+#define Elf_Note	Elf64_Note
++/* Following tools-only interfaces may change in future. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
 +
-+#define ELF_R_SYM	ELF64_R_SYM
-+#define ELF_R_TYPE	ELF64_R_TYPE
-+#define ELF_R_INFO	ELF64_R_INFO
-+#define ELFCLASS	ELFCLASS64
++/* Track dirty VRAM. */
++#define HVMOP_track_dirty_vram    6
++struct xen_hvm_track_dirty_vram {
++    /* Domain to be tracked. */
++    domid_t  domid;
++    /* First pfn to track. */
++    uint64_aligned_t first_pfn;
++    /* Number of pages to track. */
++    uint64_aligned_t nr;
++    /* OUT variable. */
++    /* Dirty bitmap buffer. */
++    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++};
++typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
++
++/* Notify that some pages got modified by the Device Model. */
++#define HVMOP_modified_memory    7
++struct xen_hvm_modified_memory {
++    /* Domain to be updated. */
++    domid_t  domid;
++    /* First pfn. */
++    uint64_aligned_t first_pfn;
++    /* Number of pages. */
++    uint64_aligned_t nr;
++};
++typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
++
++#define HVMOP_set_mem_type    8
++typedef enum {
++    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
++    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
++    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
++} hvmmem_type_t;
++/* Notify that a region of memory is to be treated in a specific way. */
++struct xen_hvm_set_mem_type {
++    /* Domain to be updated. */
++    domid_t domid;
++    /* Memory type */
++    hvmmem_type_t hvmmem_type;
++    /* First pfn. */
++    uint64_aligned_t first_pfn;
++    /* Number of pages. */
++    uint64_aligned_t nr;
++};
++typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
 +
-+#define ELF_ST_BIND	ELF64_ST_BIND
-+#define ELF_ST_TYPE	ELF64_ST_TYPE
-+#define ELF_ST_INFO	ELF64_ST_INFO
 +
-+#define AuxInfo		Aux64Info
-+#endif
++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 +
-+#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/event_channel.h tmp-linux-2.6-xen.patch/include/xen/interface/event_channel.h
---- pristine-linux-2.6.18.2/include/xen/interface/event_channel.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/event_channel.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,264 @@
-+/******************************************************************************
-+ * event_channel.h
-+ * 
-+ * Event channels between domains.
++#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/ioreq.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/ioreq.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,127 @@
++/*
++ * ioreq.h: I/O request definitions for device models
++ * Copyright (c) 2004, Intel Corporation.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -99177,257 +138243,226 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2003-2004, K A Fraser.
 + */
 +
-+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
-+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
++#ifndef _IOREQ_H_
++#define _IOREQ_H_
 +
-+/*
-+ * Prototype for this hypercall is:
-+ *  int event_channel_op(int cmd, void *args)
-+ * @cmd  == EVTCHNOP_??? (event-channel operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
++#define IOREQ_READ      1
++#define IOREQ_WRITE     0
 +
-+typedef uint32_t evtchn_port_t;
-+DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
++#define STATE_IOREQ_NONE        0
++#define STATE_IOREQ_READY       1
++#define STATE_IOREQ_INPROCESS   2
++#define STATE_IORESP_READY      3
 +
-+/*
-+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
-+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
-+ * is allocated in <dom> and returned as <port>.
-+ * NOTES:
-+ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
-+ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_alloc_unbound    6
-+struct evtchn_alloc_unbound {
-+    /* IN parameters */
-+    domid_t dom, remote_dom;
-+    /* OUT parameters */
-+    evtchn_port_t port;
-+};
-+typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
++#define IOREQ_TYPE_PIO          0 /* pio */
++#define IOREQ_TYPE_COPY         1 /* mmio ops */
++#define IOREQ_TYPE_TIMEOFFSET   7
++#define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
 +
 +/*
-+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
-+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
-+ * a port that is unbound and marked as accepting bindings from the calling
-+ * domain. A fresh port is allocated in the calling domain and returned as
-+ * <local_port>.
-+ * NOTES:
-+ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
++ * VMExit dispatcher should cooperate with instruction decoder to
++ * prepare this structure and notify service OS and DM by sending
++ * virq
 + */
-+#define EVTCHNOP_bind_interdomain 0
-+struct evtchn_bind_interdomain {
-+    /* IN parameters. */
-+    domid_t remote_dom;
-+    evtchn_port_t remote_port;
-+    /* OUT parameters. */
-+    evtchn_port_t local_port;
++struct ioreq {
++    uint64_t addr;          /*  physical address            */
++    uint64_t size;          /*  size in bytes               */
++    uint64_t count;         /*  for rep prefixes            */
++    uint64_t data;          /*  data (or paddr of data)     */
++    uint8_t state:4;
++    uint8_t data_is_ptr:1;  /*  if 1, data above is the guest paddr 
++                             *   of the real data to use.   */
++    uint8_t dir:1;          /*  1=read, 0=write             */
++    uint8_t df:1;
++    uint8_t pad:1;
++    uint8_t type;           /* I/O type                     */
++    uint8_t _pad0[6];
++    uint64_t io_count;      /* How many IO done on a vcpu   */
 +};
-+typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
++typedef struct ioreq ioreq_t;
 +
-+/*
-+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
-+ * vcpu.
-+ * NOTES:
-+ *  1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
-+ *     in xen.h for the classification of each VIRQ.
-+ *  2. Global VIRQs must be allocated on VCPU0 but can subsequently be
-+ *     re-bound via EVTCHNOP_bind_vcpu.
-+ *  3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
-+ *     The allocated event channel is bound to the specified vcpu and the
-+ *     binding cannot be changed.
-+ */
-+#define EVTCHNOP_bind_virq        1
-+struct evtchn_bind_virq {
-+    /* IN parameters. */
-+    uint32_t virq;
-+    uint32_t vcpu;
-+    /* OUT parameters. */
-+    evtchn_port_t port;
++struct vcpu_iodata {
++    struct ioreq vp_ioreq;
++    /* Event channel port, used for notifications to/from the device model. */
++    uint32_t vp_eport;
++    uint32_t _pad0;
 +};
-+typedef struct evtchn_bind_virq evtchn_bind_virq_t;
++typedef struct vcpu_iodata vcpu_iodata_t;
 +
-+/*
-+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
-+ * NOTES:
-+ *  1. A physical IRQ may be bound to at most one event channel per domain.
-+ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
-+ */
-+#define EVTCHNOP_bind_pirq        2
-+struct evtchn_bind_pirq {
-+    /* IN parameters. */
-+    uint32_t pirq;
-+#define BIND_PIRQ__WILL_SHARE 1
-+    uint32_t flags; /* BIND_PIRQ__* */
-+    /* OUT parameters. */
-+    evtchn_port_t port;
++struct shared_iopage {
++    struct vcpu_iodata   vcpu_iodata[1];
 +};
-+typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
++typedef struct shared_iopage shared_iopage_t;
 +
-+/*
-+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
-+ * NOTES:
-+ *  1. The allocated event channel is bound to the specified vcpu. The binding
-+ *     may not be changed.
-+ */
-+#define EVTCHNOP_bind_ipi         7
-+struct evtchn_bind_ipi {
-+    uint32_t vcpu;
-+    /* OUT parameters. */
-+    evtchn_port_t port;
++struct buf_ioreq {
++    uint8_t  type;   /* I/O type                    */
++    uint8_t  pad:1;
++    uint8_t  dir:1;  /* 1=read, 0=write             */
++    uint8_t  size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */
++    uint32_t addr:20;/* physical address            */
++    uint32_t data;   /* data                        */
 +};
-+typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
++typedef struct buf_ioreq buf_ioreq_t;
 +
-+/*
-+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
-+ * interdomain then the remote end is placed in the unbound state
-+ * (EVTCHNSTAT_unbound), awaiting a new connection.
-+ */
-+#define EVTCHNOP_close            3
-+struct evtchn_close {
-+    /* IN parameters. */
-+    evtchn_port_t port;
++#define IOREQ_BUFFER_SLOT_NUM     511 /* 8 bytes each, plus 2 4-byte indexes */
++struct buffered_iopage {
++    unsigned int read_pointer;
++    unsigned int write_pointer;
++    buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
++}; /* NB. Size of this structure must be no greater than one page. */
++typedef struct buffered_iopage buffered_iopage_t;
++
++#if defined(__ia64__)
++struct pio_buffer {
++    uint32_t page_offset;
++    uint32_t pointer;
++    uint32_t data_end;
++    uint32_t buf_size;
++    void *opaque;
 +};
-+typedef struct evtchn_close evtchn_close_t;
 +
-+/*
-+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
-+ * endpoint is <port>.
-+ */
-+#define EVTCHNOP_send             4
-+struct evtchn_send {
-+    /* IN parameters. */
-+    evtchn_port_t port;
++#define PIO_BUFFER_IDE_PRIMARY   0 /* I/O port = 0x1F0 */
++#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
++#define PIO_BUFFER_ENTRY_NUM     2
++struct buffered_piopage {
++    struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
++    uint8_t buffer[1];
 +};
-+typedef struct evtchn_send evtchn_send_t;
++#endif /* defined(__ia64__) */
++
++#define ACPI_PM1A_EVT_BLK_ADDRESS           0x0000000000001f40
++#define ACPI_PM1A_CNT_BLK_ADDRESS           (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
++#define ACPI_PM_TMR_BLK_ADDRESS             (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
++#define ACPI_GPE0_BLK_ADDRESS               (ACPI_PM_TMR_BLK_ADDRESS + 0x20)
++#define ACPI_GPE0_BLK_LEN                   0x08
++
++#endif /* _IOREQ_H_ */
 +
 +/*
-+ * EVTCHNOP_status: Get the current status of the communication channel which
-+ * has an endpoint at <dom, port>.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
-+ *     channel for which <dom> is not DOMID_SELF.
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
 + */
-+#define EVTCHNOP_status           5
-+struct evtchn_status {
-+    /* IN parameters */
-+    domid_t  dom;
-+    evtchn_port_t port;
-+    /* OUT parameters */
-+#define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
-+#define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
-+#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
-+#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
-+#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
-+#define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
-+    uint32_t status;
-+    uint32_t vcpu;                 /* VCPU to which this channel is bound.   */
-+    union {
-+        struct {
-+            domid_t dom;
-+        } unbound; /* EVTCHNSTAT_unbound */
-+        struct {
-+            domid_t dom;
-+            evtchn_port_t port;
-+        } interdomain; /* EVTCHNSTAT_interdomain */
-+        uint32_t pirq;      /* EVTCHNSTAT_pirq        */
-+        uint32_t virq;      /* EVTCHNSTAT_virq        */
-+    } u;
-+};
-+typedef struct evtchn_status evtchn_status_t;
-+
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/params.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/params.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,98 @@
 +/*
-+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
-+ * event is pending.
-+ * NOTES:
-+ *  1. IPI-bound channels always notify the vcpu specified at bind time.
-+ *     This binding cannot be changed.
-+ *  2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
-+ *     This binding cannot be changed.
-+ *  3. All other channels notify vcpu0 by default. This default is set when
-+ *     the channel is allocated (a port that is freed and subsequently reused
-+ *     has its binding reset to vcpu0).
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
 + */
-+#define EVTCHNOP_bind_vcpu        8
-+struct evtchn_bind_vcpu {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+    uint32_t vcpu;
-+};
-+typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
++
++#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
++#define __XEN_PUBLIC_HVM_PARAMS_H__
++
++#include "hvm_op.h"
 +
 +/*
-+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
-+ * a notification to the appropriate VCPU if an event is pending.
++ * Parameter space for HVMOP_{set,get}_param.
 + */
-+#define EVTCHNOP_unmask           9
-+struct evtchn_unmask {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+};
-+typedef struct evtchn_unmask evtchn_unmask_t;
 +
 +/*
-+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
++ * How should CPU0 event-channel notifications be delivered?
++ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
++ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
++ *                  Domain = val[47:32], Bus  = val[31:16],
++ *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
++ * If val == 0 then CPU0 event-channel notifications are not delivered.
 + */
-+#define EVTCHNOP_reset           10
-+struct evtchn_reset {
-+    /* IN parameters. */
-+    domid_t dom;
-+};
-+typedef struct evtchn_reset evtchn_reset_t;
++#define HVM_PARAM_CALLBACK_IRQ 0
 +
 +/*
-+ * Argument to event_channel_op_compat() hypercall. Superceded by new
-+ * event_channel_op() hypercall since 0x00030202.
++ * These are not used by Xen. They are here for convenience of HVM-guest
++ * xenbus implementations.
 + */
-+struct evtchn_op {
-+    uint32_t cmd; /* EVTCHNOP_* */
-+    union {
-+        struct evtchn_alloc_unbound    alloc_unbound;
-+        struct evtchn_bind_interdomain bind_interdomain;
-+        struct evtchn_bind_virq        bind_virq;
-+        struct evtchn_bind_pirq        bind_pirq;
-+        struct evtchn_bind_ipi         bind_ipi;
-+        struct evtchn_close            close;
-+        struct evtchn_send             send;
-+        struct evtchn_status           status;
-+        struct evtchn_bind_vcpu        bind_vcpu;
-+        struct evtchn_unmask           unmask;
-+    } u;
-+};
-+typedef struct evtchn_op evtchn_op_t;
-+DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
++#define HVM_PARAM_STORE_PFN    1
++#define HVM_PARAM_STORE_EVTCHN 2
++
++#define HVM_PARAM_PAE_ENABLED  4
++
++#define HVM_PARAM_IOREQ_PFN    5
 +
-+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
++#define HVM_PARAM_BUFIOREQ_PFN 6
++
++#ifdef __ia64__
++#define HVM_PARAM_NVRAM_FD     7
++#define HVM_PARAM_VHPT_SIZE    8
++#define HVM_PARAM_BUFPIOREQ_PFN	9
++#endif
 +
 +/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/features.h tmp-linux-2.6-xen.patch/include/xen/interface/features.h
---- pristine-linux-2.6.18.2/include/xen/interface/features.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/features.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,71 @@
-+/******************************************************************************
-+ * features.h
-+ * 
-+ * Feature flags, reported by XENVER_get_features.
++ * Set mode for virtual timers (currently x86 only):
++ *  delay_for_missed_ticks (default):
++ *   Do not advance a vcpu's time beyond the correct delivery time for
++ *   interrupts that have been missed due to preemption. Deliver missed
++ *   interrupts when the vcpu is rescheduled and advance the vcpu's virtual
++ *   time stepwise for each one.
++ *  no_delay_for_missed_ticks:
++ *   As above, missed interrupts are delivered, but guest time always tracks
++ *   wallclock (i.e., real) time while doing so.
++ *  no_missed_ticks_pending:
++ *   No missed interrupts are held pending. Instead, to ensure ticks are
++ *   delivered at some non-zero rate, if we detect missed ticks then the
++ *   internal tick alarm is not disabled if the VCPU is preempted during the
++ *   next tick period.
++ *  one_missed_tick_pending:
++ *   Missed interrupts are collapsed together and delivered as one 'late tick'.
++ *   Guest time always tracks wallclock (i.e., real) time.
++ */
++#define HVM_PARAM_TIMER_MODE   10
++#define HVMPTM_delay_for_missed_ticks    0
++#define HVMPTM_no_delay_for_missed_ticks 1
++#define HVMPTM_no_missed_ticks_pending   2
++#define HVMPTM_one_missed_tick_pending   3
++
++/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
++#define HVM_PARAM_HPET_ENABLED 11
++
++/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
++#define HVM_PARAM_IDENT_PT     12
++
++/* Device Model domain, defaults to 0. */
++#define HVM_PARAM_DM_DOMAIN    13
++
++/* ACPI S state: currently support S0 and S3 on x86. */
++#define HVM_PARAM_ACPI_S_STATE 14
++
++#define HVM_NR_PARAMS          15
++
++#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/save.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/save.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,88 @@
++/* 
++ * hvm/save.h
++ *
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
 + * 
++ * Copyright (c) 2007 XenSource Ltd.
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -99445,403 +138480,348 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_FEATURES_H__
-+#define __XEN_PUBLIC_FEATURES_H__
-+
-+/*
-+ * If set, the guest does not need to write-protect its pagetables, and can
-+ * update them via direct writes.
 + */
-+#define XENFEAT_writable_page_tables       0
 +
-+/*
-+ * If set, the guest does not need to write-protect its segment descriptor
-+ * tables, and can update them via direct writes.
-+ */
-+#define XENFEAT_writable_descriptor_tables 1
++#ifndef __XEN_PUBLIC_HVM_SAVE_H__
++#define __XEN_PUBLIC_HVM_SAVE_H__
 +
 +/*
-+ * If set, translation between the guest's 'pseudo-physical' address space
-+ * and the host's machine address space are handled by the hypervisor. In this
-+ * mode the guest does not need to perform phys-to/from-machine translations
-+ * when performing page table operations.
++ * Structures in this header *must* have the same layout in 32bit 
++ * and 64bit environments: this means that all fields must be explicitly 
++ * sized types and aligned to their sizes, and the structs must be 
++ * a multiple of eight bytes long.
++ *
++ * Only the state necessary for saving and restoring (i.e. fields 
++ * that are analogous to actual hardware state) should go in this file. 
++ * Internal mechanisms should be kept in Xen-private headers.
 + */
-+#define XENFEAT_auto_translated_physmap    2
 +
-+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
-+#define XENFEAT_supervisor_mode_kernel     3
++#if !defined(__GNUC__) || defined(__STRICT_ANSI__)
++#error "Anonymous structs/unions are a GNU extension."
++#endif
 +
-+/*
-+ * If set, the guest does not need to allocate x86 PAE page directories
-+ * below 4GB. This flag is usually implied by auto_translated_physmap.
++/* 
++ * Each entry is preceded by a descriptor giving its type and length
 + */
-+#define XENFEAT_pae_pgdir_above_4gb        4
-+
-+#define XENFEAT_NR_SUBMAPS 1
++struct hvm_save_descriptor {
++    uint16_t typecode;          /* Used to demux the various types below */
++    uint16_t instance;          /* Further demux within a type */
++    uint32_t length;            /* In bytes, *not* including this descriptor */
++};
 +
-+#endif /* __XEN_PUBLIC_FEATURES_H__ */
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++/* 
++ * Each entry has a datatype associated with it: for example, the CPU state 
++ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), 
++ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
++ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
++ * ugliness.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/foreign/Makefile tmp-linux-2.6-xen.patch/include/xen/interface/foreign/Makefile
---- pristine-linux-2.6.18.2/include/xen/interface/foreign/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/foreign/Makefile	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,37 @@
-+XEN_ROOT=../../../..
-+include $(XEN_ROOT)/Config.mk
-+
-+architectures := x86_32 x86_64 ia64
-+headers := $(patsubst %, %.h, $(architectures))
-+scripts := $(wildcard *.py)
-+
-+.PHONY: all clean check-headers
-+all: $(headers) check-headers
-+
-+clean:
-+	rm -f $(headers)
-+	rm -f checker checker.c $(XEN_TARGET_ARCH).size
-+	rm -f *.pyc *.o *~
-+
-+ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
-+check-headers: checker
-+	./checker > $(XEN_TARGET_ARCH).size
-+	diff -u reference.size $(XEN_TARGET_ARCH).size
-+checker: checker.c $(headers)
-+	$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
-+else
-+check-headers:
-+	@echo "cross build: skipping check"
-+endif
 +
-+x86_32.h: ../arch-x86/xen-x86_32.h ../arch-x86/xen.h ../xen.h $(scripts)
-+	python mkheader.py $* $@ $(filter %.h,$^)
++#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type)                   \
++  struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
 +
-+x86_64.h: ../arch-x86/xen-x86_64.h ../arch-x86/xen.h ../xen.h $(scripts)
-+	python mkheader.py $* $@ $(filter %.h,$^)
++#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
++#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
++#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
 +
-+ia64.h: ../arch-ia64.h ../xen.h $(scripts)
-+	python mkheader.py $* $@ $(filter %.h,$^)
 +
-+checker.c: $(scripts)
-+	python mkchecker.py $(XEN_TARGET_ARCH) $@ $(architectures)
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/foreign/mkchecker.py tmp-linux-2.6-xen.patch/include/xen/interface/foreign/mkchecker.py
---- pristine-linux-2.6.18.2/include/xen/interface/foreign/mkchecker.py	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/foreign/mkchecker.py	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,58 @@
-+#!/usr/bin/python
++/* 
++ * The series of save records is teminated by a zero-type, zero-length 
++ * descriptor.
++ */
 +
-+import sys;
-+from structs import structs;
++struct hvm_save_end {};
++DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
 +
-+# command line arguments
-+arch    = sys.argv[1];
-+outfile = sys.argv[2];
-+archs   = sys.argv[3:];
++#if defined(__i386__) || defined(__x86_64__)
++#include "../arch-x86/hvm/save.h"
++#elif defined(__ia64__)
++#include "../arch-ia64/hvm/save.h"
++#else
++#error "unsupported architecture"
++#endif
 +
-+f = open(outfile, "w");
-+f.write('''
++#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/hvm/vmx_assist.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/hvm/vmx_assist.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,122 @@
 +/*
-+ * sanity checks for generated foreign headers:
-+ *  - verify struct sizes
++ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
 + *
-+ * generated by %s -- DO NOT EDIT
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Leendert van Doorn, leendert at watson.ibm.com
++ * Copyright (c) 2005, International Business Machines Corporation.
 + */
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <inttypes.h>
-+#include "../xen.h"
-+''');
-+
-+for a in archs:
-+    f.write('#include "%s.h"\n' % a);
-+
-+f.write('int main(int argc, char *argv[])\n{\n');
-+
-+f.write('\tprintf("\\n");');
-+f.write('printf("%-20s |", "structs");\n');
-+for a in archs:
-+    f.write('\tprintf("%%8s", "%s");\n' % a);
-+f.write('\tprintf("\\n");');
-+
-+f.write('\tprintf("\\n");');
-+for struct in structs:
-+    f.write('\tprintf("%%-20s |", "%s");\n' % struct);
-+    for a in archs:
-+        if a == arch:
-+            s = struct; # native
-+        else:
-+            s = struct + "_" + a;
-+        f.write('#ifdef %s_has_no_%s\n' % (a, struct));
-+        f.write('\tprintf("%8s", "-");\n');
-+        f.write("#else\n");
-+        f.write('\tprintf("%%8zd", sizeof(struct %s));\n' % s);
-+        f.write("#endif\n");
-+
-+    f.write('\tprintf("\\n");\n\n');
-+
-+f.write('\tprintf("\\n");\n');
-+f.write('\texit(0);\n');
-+f.write('}\n');
-+
-+f.close();
-+
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/foreign/mkheader.py tmp-linux-2.6-xen.patch/include/xen/interface/foreign/mkheader.py
---- pristine-linux-2.6.18.2/include/xen/interface/foreign/mkheader.py	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/foreign/mkheader.py	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,153 @@
-+#!/usr/bin/python
 +
-+import sys, re;
-+from structs import structs, defines;
++#ifndef _VMX_ASSIST_H_
++#define _VMX_ASSIST_H_
 +
-+# command line arguments
-+arch    = sys.argv[1];
-+outfile = sys.argv[2];
-+infiles = sys.argv[3:];
++#define VMXASSIST_BASE         0xD0000
++#define VMXASSIST_MAGIC        0x17101966
++#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
 +
++#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
++#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
 +
-+###########################################################################
-+# configuration #2: architecture information
++#ifndef __ASSEMBLY__
 +
-+inttypes = {};
-+header = {};
-+footer = {};
++#define NR_EXCEPTION_HANDLER    32
++#define NR_INTERRUPT_HANDLERS   16
++#define NR_TRAPS        (NR_EXCEPTION_HANDLER+NR_INTERRUPT_HANDLERS)
 +
-+# x86_32
-+inttypes["x86_32"] = {
-+    "unsigned long" : "uint32_t",
-+    "long"          : "uint32_t",
-+    "xen_pfn_t"     : "uint32_t",
++union vmcs_arbytes {
++    struct arbyte_fields {
++        unsigned int seg_type : 4,
++            s         : 1,
++            dpl       : 2,
++            p         : 1,
++            reserved0 : 4,
++            avl       : 1,
++            reserved1 : 1,
++            default_ops_size: 1,
++            g         : 1,
++            null_bit  : 1,
++            reserved2 : 15;
++    } fields;
++    unsigned int bytes;
 +};
-+header["x86_32"] = """
-+#define __i386___X86_32 1
-+#pragma pack(4)
-+""";
-+footer["x86_32"] = """
-+#pragma pack()
-+""";
 +
-+# x86_64
-+inttypes["x86_64"] = {
-+    "unsigned long" : "__align8__ uint64_t",
-+    "long"          : "__align8__ uint64_t",
-+    "xen_pfn_t"     : "__align8__ uint64_t",
-+};
-+header["x86_64"] = """
-+#ifdef __GNUC__
-+# define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
-+# define __align8__ __attribute__((aligned (8)))
-+#else
-+# define __DECL_REG(name) uint64_t r ## name
-+# define __align8__ FIXME
-+#endif
-+#define __x86_64___X86_64 1
-+""";
++/*
++ * World switch state
++ */
++struct vmx_assist_context {
++    uint32_t  eip;        /* execution pointer */
++    uint32_t  esp;        /* stack pointer */
++    uint32_t  eflags;     /* flags register */
++    uint32_t  cr0;
++    uint32_t  cr3;        /* page table directory */
++    uint32_t  cr4;
++    uint32_t  idtr_limit; /* idt */
++    uint32_t  idtr_base;
++    uint32_t  gdtr_limit; /* gdt */
++    uint32_t  gdtr_base;
++    uint32_t  cs_sel;     /* cs selector */
++    uint32_t  cs_limit;
++    uint32_t  cs_base;
++    union vmcs_arbytes cs_arbytes;
++    uint32_t  ds_sel;     /* ds selector */
++    uint32_t  ds_limit;
++    uint32_t  ds_base;
++    union vmcs_arbytes ds_arbytes;
++    uint32_t  es_sel;     /* es selector */
++    uint32_t  es_limit;
++    uint32_t  es_base;
++    union vmcs_arbytes es_arbytes;
++    uint32_t  ss_sel;     /* ss selector */
++    uint32_t  ss_limit;
++    uint32_t  ss_base;
++    union vmcs_arbytes ss_arbytes;
++    uint32_t  fs_sel;     /* fs selector */
++    uint32_t  fs_limit;
++    uint32_t  fs_base;
++    union vmcs_arbytes fs_arbytes;
++    uint32_t  gs_sel;     /* gs selector */
++    uint32_t  gs_limit;
++    uint32_t  gs_base;
++    union vmcs_arbytes gs_arbytes;
++    uint32_t  tr_sel;     /* task selector */
++    uint32_t  tr_limit;
++    uint32_t  tr_base;
++    union vmcs_arbytes tr_arbytes;
++    uint32_t  ldtr_sel;   /* ldtr selector */
++    uint32_t  ldtr_limit;
++    uint32_t  ldtr_base;
++    union vmcs_arbytes ldtr_arbytes;
 +
-+# ia64
-+inttypes["ia64"] = {
-+    "unsigned long" : "__align8__ uint64_t",
-+    "long"          : "__align8__ uint64_t",
-+    "xen_pfn_t"     : "__align8__ uint64_t",
-+    "long double"   : "__align16__ ldouble_t",
++    unsigned char rm_irqbase[2];
 +};
-+header["ia64"] = """
-+#define __align8__ __attribute__((aligned (8)))
-+#define __align16__ __attribute__((aligned (16)))
-+typedef unsigned char ldouble_t[16];
-+""";
-+
-+
-+###########################################################################
-+# main
++typedef struct vmx_assist_context vmx_assist_context_t;
 +
-+input  = "";
-+output = "";
-+fileid = re.sub("[-.]", "_", "__FOREIGN_%s__" % outfile.upper());
++#endif /* __ASSEMBLY__ */
 +
-+# read input header files
-+for name in infiles:
-+    f = open(name, "r");
-+    input += f.read();
-+    f.close();
++#endif /* _VMX_ASSIST_H_ */
 +
-+# add header
-+output += """
 +/*
-+ * public xen defines and struct for %s
-+ * generated by %s -- DO NOT EDIT
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/blkif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/blkif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,141 @@
++/******************************************************************************
++ * blkif.h
++ * 
++ * Unified block-device I/O interface for Xen guest OSes.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
 + */
 +
-+#ifndef %s
-+#define %s 1
-+
-+""" % (arch, sys.argv[0], fileid, fileid)
-+
-+if arch in header:
-+    output += header[arch];
-+    output += "\n";
-+
-+# add defines to output
-+for line in re.findall("#define[^\n]+", input):
-+    for define in defines:
-+        regex = "#define\s+%s\\b" % define;
-+        match = re.search(regex, line);
-+        if None == match:
-+            continue;
-+        if define.upper()[0] == define[0]:
-+            replace = define + "_" + arch.upper();
-+        else:
-+            replace = define + "_" + arch;
-+        regex = "\\b%s\\b" % define;
-+        output += re.sub(regex, replace, line) + "\n";
-+output += "\n";
-+
-+# delete defines, comments, empty lines
-+input = re.sub("#define[^\n]+\n", "", input);
-+input = re.compile("/\*(.*?)\*/", re.S).sub("", input)
-+input = re.compile("\n\s*\n", re.S).sub("\n", input);
-+
-+# add structs to output
-+for struct in structs:
-+    regex = "struct\s+%s\s*\{(.*?)\n\};" % struct;
-+    match = re.search(regex, input, re.S)
-+    if None == match:
-+        output += "#define %s_has_no_%s 1\n" % (arch, struct);
-+    else:
-+        output += "struct %s_%s {%s\n};\n" % (struct, arch, match.group(1));
-+        output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch);
-+    output += "\n";
-+
-+# add footer
-+if arch in footer:
-+    output += footer[arch];
-+    output += "\n";
-+output += "#endif /* %s */\n" % fileid;
-+
-+# replace: defines
-+for define in defines:
-+    if define.upper()[0] == define[0]:
-+        replace = define + "_" + arch.upper();
-+    else:
-+        replace = define + "_" + arch;
-+    output = re.sub("\\b%s\\b" % define, replace, output);
-+
-+# replace: structs + struct typedefs
-+for struct in structs:
-+    output = re.sub("\\b(struct\s+%s)\\b" % struct, "\\1_%s" % arch, output);
-+    output = re.sub("\\b(%s)_t\\b" % struct, "\\1_%s_t" % arch, output);
-+
-+# replace: integer types
-+integers = inttypes[arch].keys();
-+integers.sort(lambda a, b: cmp(len(b),len(a)));
-+for type in integers:
-+    output = re.sub("\\b%s\\b" % type, inttypes[arch][type], output);
-+
-+# print results
-+f = open(outfile, "w");
-+f.write(output);
-+f.close;
++#ifndef __XEN_PUBLIC_IO_BLKIF_H__
++#define __XEN_PUBLIC_IO_BLKIF_H__
 +
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/foreign/reference.size tmp-linux-2.6-xen.patch/include/xen/interface/foreign/reference.size
---- pristine-linux-2.6.18.2/include/xen/interface/foreign/reference.size	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/foreign/reference.size	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,17 @@
++#include "ring.h"
++#include "../grant_table.h"
 +
-+structs              |  x86_32  x86_64    ia64
++/*
++ * Front->back notifications: When enqueuing a new request, sending a
++ * notification can be made conditional on req_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Backends must set
++ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
++ * 
++ * Back->front notifications: When enqueuing a new response, sending a
++ * notification can be made conditional on rsp_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Frontends must set
++ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
++ */
 +
-+start_info           |    1104    1152    1152
-+trap_info            |       8      16       -
-+pt_fpreg             |       -       -      16
-+cpu_user_regs        |      68     200     496
-+xen_ia64_boot_param  |       -       -      96
-+ia64_tr_entry        |       -       -      32
-+vcpu_extra_regs      |       -       -     536
-+vcpu_guest_context   |    2800    5168    1056
-+arch_vcpu_info       |      24      16       0
-+vcpu_time_info       |      32      32      32
-+vcpu_info            |      64      64      48
-+arch_shared_info     |     268     280     272
-+shared_info          |    2584    3368    4384
-+
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/foreign/structs.py tmp-linux-2.6-xen.patch/include/xen/interface/foreign/structs.py
---- pristine-linux-2.6.18.2/include/xen/interface/foreign/structs.py	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/foreign/structs.py	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,54 @@
-+# configuration: what needs translation
++#ifndef blkif_vdev_t
++#define blkif_vdev_t   uint16_t
++#endif
++#define blkif_sector_t uint64_t
 +
-+structs = [ "start_info",
-+            "trap_info",
-+            "pt_fpreg",
-+            "cpu_user_regs",
-+            "xen_ia64_boot_param",
-+            "ia64_tr_entry",
-+            "vcpu_extra_regs",
-+            "vcpu_guest_context",
-+            "arch_vcpu_info",
-+            "vcpu_time_info",
-+            "vcpu_info",
-+            "arch_shared_info",
-+            "shared_info" ];
++/*
++ * REQUEST CODES.
++ */
++#define BLKIF_OP_READ              0
++#define BLKIF_OP_WRITE             1
++/*
++ * Recognised only if "feature-barrier" is present in backend xenbus info.
++ * The "feature-barrier" node contains a boolean indicating whether barrier
++ * requests are likely to succeed or fail. Either way, a barrier request
++ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
++ * the underlying block-device hardware. The boolean simply indicates whether
++ * or not it is worthwhile for the frontend to attempt barrier requests.
++ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
++ * create the "feature-barrier" node!
++ */
++#define BLKIF_OP_WRITE_BARRIER     2
++/*
++ * Recognised if "feature-flush-cache" is present in backend xenbus
++ * info.  A flush will ask the underlying storage hardware to flush its
++ * non-volatile caches as appropriate.  The "feature-flush-cache" node
++ * contains a boolean indicating whether flush requests are likely to
++ * succeed or fail. Either way, a flush request may fail at any time
++ * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
++ * block-device hardware. The boolean simply indicates whether or not it
++ * is worthwhile for the frontend to attempt flushes.  If a backend does
++ * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
++ * "feature-flush-cache" node!
++ */
++#define BLKIF_OP_FLUSH_DISKCACHE   3
 +
-+defines = [ "__i386__",
-+            "__x86_64__",
++/*
++ * Maximum scatter/gather segments per request.
++ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
++ * NB. This could be 12 if the ring indexes weren't stored in the same page.
++ */
++#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 +
-+            "FLAT_RING1_CS",
-+            "FLAT_RING1_DS",
-+            "FLAT_RING1_SS",
++struct blkif_request_segment {
++    grant_ref_t gref;        /* reference to I/O buffer frame        */
++    /* @first_sect: first sector in frame to transfer (inclusive).   */
++    /* @last_sect: last sector in frame to transfer (inclusive).     */
++    uint8_t     first_sect, last_sect;
++};
 +
-+            "FLAT_RING3_CS64",
-+            "FLAT_RING3_DS64",
-+            "FLAT_RING3_SS64",
-+            "FLAT_KERNEL_CS64",
-+            "FLAT_KERNEL_DS64",
-+            "FLAT_KERNEL_SS64",
++struct blkif_request {
++    uint8_t        operation;    /* BLKIF_OP_???                         */
++    uint8_t        nr_segments;  /* number of segments                   */
++    blkif_vdev_t   handle;       /* only for read/write requests         */
++    uint64_t       id;           /* private guest value, echoed in resp  */
++    blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
++    struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++typedef struct blkif_request blkif_request_t;
 +
-+            "FLAT_KERNEL_CS",
-+            "FLAT_KERNEL_DS",
-+            "FLAT_KERNEL_SS",
++struct blkif_response {
++    uint64_t        id;              /* copied from request */
++    uint8_t         operation;       /* copied from request */
++    int16_t         status;          /* BLKIF_RSP_???       */
++};
++typedef struct blkif_response blkif_response_t;
 +
-+            # x86_{32,64}
-+            "_VGCF_i387_valid",
-+            "VGCF_i387_valid",
-+            "_VGCF_in_kernel",
-+            "VGCF_in_kernel",
-+            "_VGCF_failsafe_disables_events",
-+            "VGCF_failsafe_disables_events",
-+            "_VGCF_syscall_disables_events",
-+            "VGCF_syscall_disables_events",
-+            "_VGCF_online",
-+            "VGCF_online",
++/*
++ * STATUS RETURN CODES.
++ */
++ /* Operation not supported (only happens on barrier writes). */
++#define BLKIF_RSP_EOPNOTSUPP  -2
++ /* Operation failed for some unspecified reason (-EIO). */
++#define BLKIF_RSP_ERROR       -1
++ /* Operation completed successfully. */
++#define BLKIF_RSP_OKAY         0
 +
-+            # ia64
-+            "VGCF_EXTRA_REGS",
++/*
++ * Generate blkif ring structures and types.
++ */
 +
-+            # all archs
-+            "xen_pfn_to_cr3",
-+            "MAX_VIRT_CPUS",
-+            "MAX_GUEST_CMDLINE" ];
++DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
 +
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/grant_table.h tmp-linux-2.6-xen.patch/include/xen/interface/grant_table.h
---- pristine-linux-2.6.18.2/include/xen/interface/grant_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/grant_table.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,399 @@
++#define VDISK_CDROM        0x1
++#define VDISK_REMOVABLE    0x2
++#define VDISK_READONLY     0x4
++
++#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/console.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/console.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,51 @@
 +/******************************************************************************
-+ * grant_table.h
++ * console.h
 + * 
-+ * Interface for granting foreign access to page frames, and receiving
-+ * page-ownership transfers.
++ * Console I/O interface for Xen guest OSes.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -99861,371 +138841,204 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2004, K A Fraser
++ * Copyright (c) 2005, Keir Fraser
 + */
 +
-+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
-+#define __XEN_PUBLIC_GRANT_TABLE_H__
-+
++#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
++#define __XEN_PUBLIC_IO_CONSOLE_H__
 +
-+/***********************************
-+ * GRANT TABLE REPRESENTATION
-+ */
++typedef uint32_t XENCONS_RING_IDX;
 +
-+/* Some rough guidelines on accessing and updating grant-table entries
-+ * in a concurrency-safe manner. For more information, Linux contains a
-+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
-+ * 
-+ * NB. WMB is a no-op on current-generation x86 processors. However, a
-+ *     compiler barrier will still be required.
-+ * 
-+ * Introducing a valid entry into the grant table:
-+ *  1. Write ent->domid.
-+ *  2. Write ent->frame:
-+ *      GTF_permit_access:   Frame to which access is permitted.
-+ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
-+ *                           frame, or zero if none.
-+ *  3. Write memory barrier (WMB).
-+ *  4. Write ent->flags, inc. valid type.
-+ * 
-+ * Invalidating an unused GTF_permit_access entry:
-+ *  1. flags = ent->flags.
-+ *  2. Observe that !(flags & (GTF_reading|GTF_writing)).
-+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
-+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *
-+ * Invalidating an in-use GTF_permit_access entry:
-+ *  This cannot be done directly. Request assistance from the domain controller
-+ *  which can set a timeout on the use of a grant entry and take necessary
-+ *  action. (NB. This is not yet implemented!).
-+ * 
-+ * Invalidating an unused GTF_accept_transfer entry:
-+ *  1. flags = ent->flags.
-+ *  2. Observe that !(flags & GTF_transfer_committed). [*]
-+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
-+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *  [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
-+ *      The guest must /not/ modify the grant entry until the address of the
-+ *      transferred frame is written. It is safe for the guest to spin waiting
-+ *      for this to occur (detect by observing GTF_transfer_completed in
-+ *      ent->flags).
-+ *
-+ * Invalidating a committed GTF_accept_transfer entry:
-+ *  1. Wait for (ent->flags & GTF_transfer_completed).
-+ *
-+ * Changing a GTF_permit_access from writable to read-only:
-+ *  Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
-+ * 
-+ * Changing a GTF_permit_access from read-only to writable:
-+ *  Use SMP-safe bit-setting instruction.
-+ */
++#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
 +
-+/*
-+ * A grant table comprises a packed array of grant entries in one or more
-+ * page frames shared between Xen and a guest.
-+ * [XEN]: This field is written by Xen and read by the sharing guest.
-+ * [GST]: This field is written by the guest and read by Xen.
-+ */
-+struct grant_entry {
-+    /* GTF_xxx: various type and flag information.  [XEN,GST] */
-+    uint16_t flags;
-+    /* The domain being granted foreign privileges. [GST] */
-+    domid_t  domid;
-+    /*
-+     * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
-+     * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
-+     */
-+    uint32_t frame;
++struct xencons_interface {
++    char in[1024];
++    char out[2048];
++    XENCONS_RING_IDX in_cons, in_prod;
++    XENCONS_RING_IDX out_cons, out_prod;
 +};
-+typedef struct grant_entry grant_entry_t;
 +
-+/*
-+ * Type of grant entry.
-+ *  GTF_invalid: This grant entry grants no privileges.
-+ *  GTF_permit_access: Allow @domid to map/access @frame.
-+ *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
-+ *                       to this guest. Xen writes the page number to @frame.
-+ */
-+#define GTF_invalid         (0U<<0)
-+#define GTF_permit_access   (1U<<0)
-+#define GTF_accept_transfer (2U<<0)
-+#define GTF_type_mask       (3U<<0)
++#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
 +
 +/*
-+ * Subflags for GTF_permit_access.
-+ *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
-+ *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
-+ *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
 + */
-+#define _GTF_readonly       (2)
-+#define GTF_readonly        (1U<<_GTF_readonly)
-+#define _GTF_reading        (3)
-+#define GTF_reading         (1U<<_GTF_reading)
-+#define _GTF_writing        (4)
-+#define GTF_writing         (1U<<_GTF_writing)
-+
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/fbif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/fbif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,176 @@
 +/*
-+ * Subflags for GTF_accept_transfer:
-+ *  GTF_transfer_committed: Xen sets this flag to indicate that it is committed
-+ *      to transferring ownership of a page frame. When a guest sees this flag
-+ *      it must /not/ modify the grant entry until GTF_transfer_completed is
-+ *      set by Xen.
-+ *  GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
-+ *      after reading GTF_transfer_committed. Xen will always write the frame
-+ *      address, followed by ORing this flag, in a timely manner.
++ * fbif.h -- Xen virtual frame buffer device
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
 + */
-+#define _GTF_transfer_committed (2)
-+#define GTF_transfer_committed  (1U<<_GTF_transfer_committed)
-+#define _GTF_transfer_completed (3)
-+#define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
 +
++#ifndef __XEN_PUBLIC_IO_FBIF_H__
++#define __XEN_PUBLIC_IO_FBIF_H__
 +
-+/***********************************
-+ * GRANT TABLE QUERIES AND USES
-+ */
++/* Out events (frontend -> backend) */
 +
 +/*
-+ * Reference to a grant entry in a specified domain's grant table.
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
 + */
-+typedef uint32_t grant_ref_t;
 +
++/* Event type 1 currently not used */
 +/*
-+ * Handle to track a mapping created via a grant reference.
++ * Framebuffer update notification event
++ * Capable frontend sets feature-update in xenstore.
++ * Backend requests it by setting request-update in xenstore.
 + */
-+typedef uint32_t grant_handle_t;
++#define XENFB_TYPE_UPDATE 2
 +
-+/*
-+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
-+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
-+ * that must be presented later to destroy the mapping(s). On error, <handle>
-+ * is a negative status code.
-+ * NOTES:
-+ *  1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
-+ *     via which I/O devices may access the granted frame.
-+ *  2. If GNTMAP_host_map is specified then a mapping will be added at
-+ *     either a host virtual address in the current address space, or at
-+ *     a PTE at the specified machine address.  The type of mapping to
-+ *     perform is selected through the GNTMAP_contains_pte flag, and the 
-+ *     address is specified in <host_addr>.
-+ *  3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
-+ *     host mapping is destroyed by other means then it is *NOT* guaranteed
-+ *     to be accounted to the correct grant reference!
-+ */
-+#define GNTTABOP_map_grant_ref        0
-+struct gnttab_map_grant_ref {
-+    /* IN parameters. */
-+    uint64_t host_addr;
-+    uint32_t flags;               /* GNTMAP_* */
-+    grant_ref_t ref;
-+    domid_t  dom;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+    grant_handle_t handle;
-+    uint64_t dev_bus_addr;
++struct xenfb_update
++{
++    uint8_t type;    /* XENFB_TYPE_UPDATE */
++    int32_t x;      /* source x */
++    int32_t y;      /* source y */
++    int32_t width;  /* rect width */
++    int32_t height; /* rect height */
 +};
-+typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
 +
 +/*
-+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
-+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
-+ * field is ignored. If non-zero, they must refer to a device/host mapping
-+ * that is tracked by <handle>
-+ * NOTES:
-+ *  1. The call may fail in an undefined manner if either mapping is not
-+ *     tracked by <handle>.
-+ *  3. After executing a batch of unmaps, it is guaranteed that no stale
-+ *     mappings will remain in the device or host TLBs.
++ * Framebuffer resize notification event
++ * Capable backend sets feature-resize in xenstore.
 + */
-+#define GNTTABOP_unmap_grant_ref      1
-+struct gnttab_unmap_grant_ref {
-+    /* IN parameters. */
-+    uint64_t host_addr;
-+    uint64_t dev_bus_addr;
-+    grant_handle_t handle;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+};
-+typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
++#define XENFB_TYPE_RESIZE 3
 +
-+/*
-+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
-+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
-+ * Only <nr_frames> addresses are written, even if the table is larger.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ *  3. Xen may not support more than a single grant-table page per domain.
-+ */
-+#define GNTTABOP_setup_table          2
-+struct gnttab_setup_table {
-+    /* IN parameters. */
-+    domid_t  dom;
-+    uint32_t nr_frames;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+    XEN_GUEST_HANDLE(ulong) frame_list;
++struct xenfb_resize
++{
++    uint8_t type;    /* XENFB_TYPE_RESIZE */
++    int32_t width;   /* width in pixels */
++    int32_t height;  /* height in pixels */
++    int32_t stride;  /* stride in bytes */
++    int32_t depth;   /* depth in bits */
++    int32_t offset;  /* offset of the framebuffer in bytes */
 +};
-+typedef struct gnttab_setup_table gnttab_setup_table_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
 +
-+/*
-+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
-+ * xen console. Debugging use only.
-+ */
-+#define GNTTABOP_dump_table           3
-+struct gnttab_dump_table {
-+    /* IN parameters. */
-+    domid_t dom;
-+    /* OUT parameters. */
-+    int16_t status;               /* GNTST_* */
-+};
-+typedef struct gnttab_dump_table gnttab_dump_table_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
++#define XENFB_OUT_EVENT_SIZE 40
 +
-+/*
-+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
-+ * foreign domain has previously registered its interest in the transfer via
-+ * <domid, ref>.
-+ * 
-+ * Note that, even if the transfer fails, the specified page no longer belongs
-+ * to the calling domain *unless* the error is GNTST_bad_page.
-+ */
-+#define GNTTABOP_transfer                4
-+struct gnttab_transfer {
-+    /* IN parameters. */
-+    xen_pfn_t     mfn;
-+    domid_t       domid;
-+    grant_ref_t   ref;
-+    /* OUT parameters. */
-+    int16_t       status;
++union xenfb_out_event
++{
++    uint8_t type;
++    struct xenfb_update update;
++    struct xenfb_resize resize;
++    char pad[XENFB_OUT_EVENT_SIZE];
 +};
-+typedef struct gnttab_transfer gnttab_transfer_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
 +
++/* In events (backend -> frontend) */
 +
 +/*
-+ * GNTTABOP_copy: Hypervisor based copy
-+ * source and destinations can be eithers MFNs or, for foreign domains,
-+ * grant references. the foreign domain has to grant read/write access
-+ * in its grant table.
-+ *
-+ * The flags specify what type source and destinations are (either MFN
-+ * or grant reference).
-+ *
-+ * Note that this can also be used to copy data between two domains
-+ * via a third party if the source and destination domains had previously
-+ * grant appropriate access to their pages to the third party.
-+ *
-+ * source_offset specifies an offset in the source frame, dest_offset
-+ * the offset in the target frame and  len specifies the number of
-+ * bytes to be copied.
++ * Frontends should ignore unknown in events.
 + */
 +
-+#define _GNTCOPY_source_gref      (0)
-+#define GNTCOPY_source_gref       (1<<_GNTCOPY_source_gref)
-+#define _GNTCOPY_dest_gref        (1)
-+#define GNTCOPY_dest_gref         (1<<_GNTCOPY_dest_gref)
++/*
++ * Framebuffer refresh period advice
++ * Backend sends it to advise the frontend their preferred period of
++ * refresh.  Frontends that keep the framebuffer constantly up-to-date
++ * just ignore it.  Frontends that use the advice should immediately
++ * refresh the framebuffer (and send an update notification event if
++ * those have been requested), then use the update frequency to guide
++ * their periodical refreshs.
++ */
++#define XENFB_TYPE_REFRESH_PERIOD 1
++#define XENFB_NO_REFRESH 0
++
++struct xenfb_refresh_period
++{
++    uint8_t type;    /* XENFB_TYPE_UPDATE_PERIOD */
++    uint32_t period; /* period of refresh, in ms,
++                      * XENFB_NO_REFRESH if no refresh is needed */
++};
 +
-+#define GNTTABOP_copy                 5
-+typedef struct gnttab_copy {
-+    /* IN parameters. */
-+    struct {
-+        union {
-+            grant_ref_t ref;
-+            xen_pfn_t   gmfn;
-+        } u;
-+        domid_t  domid;
-+        uint16_t offset;
-+    } source, dest;
-+    uint16_t      len;
-+    uint16_t      flags;          /* GNTCOPY_* */
-+    /* OUT parameters. */
-+    int16_t       status;
-+} gnttab_copy_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
++#define XENFB_IN_EVENT_SIZE 40
 +
-+/*
-+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
-+ * grant table.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ */
-+#define GNTTABOP_query_size           6
-+struct gnttab_query_size {
-+    /* IN parameters. */
-+    domid_t  dom;
-+    /* OUT parameters. */
-+    uint32_t nr_frames;
-+    uint32_t max_nr_frames;
-+    int16_t  status;              /* GNTST_* */
++union xenfb_in_event
++{
++    uint8_t type;
++    struct xenfb_refresh_period refresh_period;
++    char pad[XENFB_IN_EVENT_SIZE];
 +};
-+typedef struct gnttab_query_size gnttab_query_size_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
 +
++/* shared page */
 +
-+/*
-+ * Bitfield values for update_pin_status.flags.
-+ */
-+ /* Map the grant entry for access by I/O devices. */
-+#define _GNTMAP_device_map      (0)
-+#define GNTMAP_device_map       (1<<_GNTMAP_device_map)
-+ /* Map the grant entry for access by host CPUs. */
-+#define _GNTMAP_host_map        (1)
-+#define GNTMAP_host_map         (1<<_GNTMAP_host_map)
-+ /* Accesses to the granted frame will be restricted to read-only access. */
-+#define _GNTMAP_readonly        (2)
-+#define GNTMAP_readonly         (1<<_GNTMAP_readonly)
-+ /*
-+  * GNTMAP_host_map subflag:
-+  *  0 => The host mapping is usable only by the guest OS.
-+  *  1 => The host mapping is usable by guest OS + current application.
-+  */
-+#define _GNTMAP_application_map (3)
-+#define GNTMAP_application_map  (1<<_GNTMAP_application_map)
++#define XENFB_IN_RING_SIZE 1024
++#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
++#define XENFB_IN_RING_OFFS 1024
++#define XENFB_IN_RING(page) \
++    ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
++#define XENFB_IN_RING_REF(page, idx) \
++    (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
 +
-+ /*
-+  * GNTMAP_contains_pte subflag:
-+  *  0 => This map request contains a host virtual address.
-+  *  1 => This map request contains the machine addess of the PTE to update.
-+  */
-+#define _GNTMAP_contains_pte    (4)
-+#define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
++#define XENFB_OUT_RING_SIZE 2048
++#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
++#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
++#define XENFB_OUT_RING(page) \
++    ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
++#define XENFB_OUT_RING_REF(page, idx) \
++    (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
 +
-+/*
-+ * Values for error status returns. All errors are -ve.
-+ */
-+#define GNTST_okay             (0)  /* Normal return.                        */
-+#define GNTST_general_error    (-1) /* General undefined error.              */
-+#define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
-+#define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
-+#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
-+#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
-+#define GNTST_bad_dev_addr     (-6) /* Inappropriate device address to unmap.*/
-+#define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
-+#define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
-+#define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
-+#define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary */
++struct xenfb_page
++{
++    uint32_t in_cons, in_prod;
++    uint32_t out_cons, out_prod;
 +
-+#define GNTTABOP_error_msgs {                   \
-+    "okay",                                     \
-+    "undefined error",                          \
-+    "unrecognised domain id",                   \
-+    "invalid grant reference",                  \
-+    "invalid mapping handle",                   \
-+    "invalid virtual address",                  \
-+    "invalid device address",                   \
-+    "no spare translation slot in the I/O MMU", \
-+    "permission denied",                        \
-+    "bad page",                                 \
-+    "copy arguments cross page boundary"        \
-+}
++    int32_t width;          /* the width of the framebuffer (in pixels) */
++    int32_t height;         /* the height of the framebuffer (in pixels) */
++    uint32_t line_length;   /* the length of a row of pixels (in bytes) */
++    uint32_t mem_length;    /* the length of the framebuffer (in bytes) */
++    uint8_t depth;          /* the depth of a pixel (in bits) */
++
++    /*
++     * Framebuffer page directory
++     *
++     * Each directory page holds PAGE_SIZE / sizeof(*pd)
++     * framebuffer pages, and can thus map up to PAGE_SIZE *
++     * PAGE_SIZE / sizeof(*pd) bytes.  With PAGE_SIZE == 4096 and
++     * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
++     * 64 bit.  256 directories give enough room for a 512 Meg
++     * framebuffer with a max resolution of 12,800x10,240.  Should
++     * be enough for a while with room leftover for expansion.
++     */
++    unsigned long pd[256];
++};
 +
-+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
++/*
++ * Wart: xenkbd needs to know default resolution.  Put it here until a
++ * better solution is found, but don't leak it to the backend.
++ */
++#ifdef __KERNEL__
++#define XENFB_WIDTH 800
++#define XENFB_HEIGHT 600
++#define XENFB_DEPTH 32
++#endif
++
++#endif
 +
 +/*
 + * Local variables:
@@ -100236,12 +139049,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/e820.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/e820.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/e820.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/e820.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,34 @@
-+
-+/*
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/fsif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/fsif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,191 @@
++/******************************************************************************
++ * fsif.h
++ * 
++ * Interface to FS level split device drivers.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -100259,29 +139075,181 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2007, Grzegorz Milos, <gm281 at cam.ac.uk>.
 + */
 +
-+#ifndef __XEN_PUBLIC_HVM_E820_H__
-+#define __XEN_PUBLIC_HVM_E820_H__
++#ifndef __XEN_PUBLIC_IO_FSIF_H__
++#define __XEN_PUBLIC_IO_FSIF_H__
 +
-+/* E820 location in HVM virtual address space. */
-+#define HVM_E820_PAGE        0x00090000
-+#define HVM_E820_NR_OFFSET   0x000001E8
-+#define HVM_E820_OFFSET      0x000002D0
++#include "ring.h"
++#include "../grant_table.h"
 +
-+#define HVM_BELOW_4G_RAM_END        0xF0000000
-+#define HVM_BELOW_4G_MMIO_START     HVM_BELOW_4G_RAM_END
-+#define HVM_BELOW_4G_MMIO_LENGTH    ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
++#define REQ_FILE_OPEN        1
++#define REQ_FILE_CLOSE       2
++#define REQ_FILE_READ        3
++#define REQ_FILE_WRITE       4
++#define REQ_STAT             5
++#define REQ_FILE_TRUNCATE    6
++#define REQ_REMOVE           7
++#define REQ_RENAME           8
++#define REQ_CREATE           9
++#define REQ_DIR_LIST        10
++#define REQ_CHMOD           11
++#define REQ_FS_SPACE        12
++#define REQ_FILE_SYNC       13
 +
-+#endif /* __XEN_PUBLIC_HVM_E820_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/hvm_info_table.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/hvm_info_table.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/hvm_info_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/hvm_info_table.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,41 @@
-+/******************************************************************************
-+ * hvm/hvm_info_table.h
-+ * 
-+ * HVM parameter and information table, written into guest memory map.
++struct fsif_open_request {
++    grant_ref_t gref;
++};
++
++struct fsif_close_request {
++    uint32_t fd;
++};
++
++struct fsif_read_request {
++    uint32_t fd;
++    int32_t pad;
++    uint64_t len;
++    uint64_t offset;
++    grant_ref_t grefs[1];  /* Variable length */
++};
++
++struct fsif_write_request {
++    uint32_t fd;
++    int32_t pad;
++    uint64_t len;
++    uint64_t offset;
++    grant_ref_t grefs[1];  /* Variable length */
++};
++
++struct fsif_stat_request {
++    uint32_t fd;
++};
++
++/* This structure is a copy of some fields from stat structure, returned
++ * via the ring. */
++struct fsif_stat_response {
++    int32_t  stat_mode;
++    uint32_t stat_uid;
++    uint32_t stat_gid;
++    int32_t  stat_ret;
++    int64_t  stat_size;
++    int64_t  stat_atime;
++    int64_t  stat_mtime;
++    int64_t  stat_ctime;
++};
++
++struct fsif_truncate_request {
++    uint32_t fd;
++    int32_t pad;
++    int64_t length;
++};
++
++struct fsif_remove_request {
++    grant_ref_t gref;
++};
++
++struct fsif_rename_request {
++    uint16_t old_name_offset;
++    uint16_t new_name_offset;
++    grant_ref_t gref;
++};
++
++struct fsif_create_request {
++    int8_t directory;
++    int8_t pad;
++    int16_t pad2;
++    int32_t mode;
++    grant_ref_t gref;
++};
++
++struct fsif_list_request {
++    uint32_t offset;
++    grant_ref_t gref;
++};
++
++#define NR_FILES_SHIFT  0
++#define NR_FILES_SIZE   16   /* 16 bits for the number of files mask */
++#define NR_FILES_MASK   (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
++#define ERROR_SIZE      32   /* 32 bits for the error mask */
++#define ERROR_SHIFT     (NR_FILES_SIZE + NR_FILES_SHIFT)
++#define ERROR_MASK      (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
++#define HAS_MORE_SHIFT  (ERROR_SHIFT + ERROR_SIZE)    
++#define HAS_MORE_FLAG   (1ULL << HAS_MORE_SHIFT)
++
++struct fsif_chmod_request {
++    uint32_t fd;
++    int32_t mode;
++};
++
++struct fsif_space_request {
++    grant_ref_t gref;
++};
++
++struct fsif_sync_request {
++    uint32_t fd;
++};
++
++
++/* FS operation request */
++struct fsif_request {
++    uint8_t type;                 /* Type of the request                  */
++    uint8_t pad;
++    uint16_t id;                  /* Request ID, copied to the response   */
++    uint32_t pad2;
++    union {
++        struct fsif_open_request     fopen;
++        struct fsif_close_request    fclose;
++        struct fsif_read_request     fread;
++        struct fsif_write_request    fwrite;
++        struct fsif_stat_request     fstat;
++        struct fsif_truncate_request ftruncate;
++        struct fsif_remove_request   fremove;
++        struct fsif_rename_request   frename;
++        struct fsif_create_request   fcreate;
++        struct fsif_list_request     flist;
++        struct fsif_chmod_request    fchmod;
++        struct fsif_space_request    fspace;
++        struct fsif_sync_request     fsync;
++    } u;
++};
++typedef struct fsif_request fsif_request_t;
++
++/* FS operation response */
++struct fsif_response {
++    uint16_t id;
++    uint16_t pad1;
++    uint32_t pad2;
++    union {
++        uint64_t ret_val;
++        struct fsif_stat_response fstat;
++    };
++};
++
++typedef struct fsif_response fsif_response_t;
++
++#define FSIF_RING_ENTRY_SIZE   64
++
++#define FSIF_NR_READ_GNTS  ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) /  \
++                                sizeof(grant_ref_t) + 1)
++#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \
++                                sizeof(grant_ref_t) + 1)
++
++DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
++
++#define STATE_INITIALISED     "init"
++#define STATE_READY           "ready"
++
++
++
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/kbdif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/kbdif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,132 @@
++/*
++ * kbdif.h -- Xen virtual keyboard/mouse
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -100300,30 +139268,127 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
 + */
 +
-+#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#ifndef __XEN_PUBLIC_IO_KBDIF_H__
++#define __XEN_PUBLIC_IO_KBDIF_H__
 +
-+#define HVM_INFO_PFN         0x09F
-+#define HVM_INFO_OFFSET      0x800
-+#define HVM_INFO_PADDR       ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
++/* In events (backend -> frontend) */
 +
-+struct hvm_info_table {
-+    char        signature[8]; /* "HVM INFO" */
-+    uint32_t    length;
-+    uint8_t     checksum;
-+    uint8_t     acpi_enabled;
-+    uint8_t     apic_mode;
-+    uint32_t    nr_vcpus;
++/*
++ * Frontends should ignore unknown in events.
++ */
++
++/* Pointer movement event */
++#define XENKBD_TYPE_MOTION  1
++/* Event type 2 currently not used */
++/* Key event (includes pointer buttons) */
++#define XENKBD_TYPE_KEY     3
++/*
++ * Pointer position event
++ * Capable backend sets feature-abs-pointer in xenstore.
++ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
++ * request-abs-update in xenstore.
++ */
++#define XENKBD_TYPE_POS     4
++
++struct xenkbd_motion
++{
++    uint8_t type;        /* XENKBD_TYPE_MOTION */
++    int32_t rel_x;       /* relative X motion */
++    int32_t rel_y;       /* relative Y motion */
++    int32_t rel_z;       /* relative Z motion (wheel) */
 +};
 +
-+#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/hvm_op.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/hvm_op.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/hvm_op.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/hvm_op.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,76 @@
++struct xenkbd_key
++{
++    uint8_t type;         /* XENKBD_TYPE_KEY */
++    uint8_t pressed;      /* 1 if pressed; 0 otherwise */
++    uint32_t keycode;     /* KEY_* from linux/input.h */
++};
++
++struct xenkbd_position
++{
++    uint8_t type;        /* XENKBD_TYPE_POS */
++    int32_t abs_x;       /* absolute X position (in FB pixels) */
++    int32_t abs_y;       /* absolute Y position (in FB pixels) */
++    int32_t rel_z;       /* relative Z motion (wheel) */
++};
++
++#define XENKBD_IN_EVENT_SIZE 40
++
++union xenkbd_in_event
++{
++    uint8_t type;
++    struct xenkbd_motion motion;
++    struct xenkbd_key key;
++    struct xenkbd_position pos;
++    char pad[XENKBD_IN_EVENT_SIZE];
++};
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ * No out events currently defined.
++ */
++
++#define XENKBD_OUT_EVENT_SIZE 40
++
++union xenkbd_out_event
++{
++    uint8_t type;
++    char pad[XENKBD_OUT_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENKBD_IN_RING_SIZE 2048
++#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
++#define XENKBD_IN_RING_OFFS 1024
++#define XENKBD_IN_RING(page) \
++    ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
++#define XENKBD_IN_RING_REF(page, idx) \
++    (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
++
++#define XENKBD_OUT_RING_SIZE 1024
++#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
++#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
++#define XENKBD_OUT_RING(page) \
++    ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
++#define XENKBD_OUT_RING_REF(page, idx) \
++    (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
++
++struct xenkbd_page
++{
++    uint32_t in_cons, in_prod;
++    uint32_t out_cons, out_prod;
++};
++
++#endif
++
 +/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/netif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/netif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,205 @@
++/******************************************************************************
++ * netif.h
++ * 
++ * Unified network-device I/O interface for Xen guest OSes.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -100341,72 +139406,196 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
 + */
 +
-+#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
-+#define __XEN_PUBLIC_HVM_HVM_OP_H__
++#ifndef __XEN_PUBLIC_IO_NETIF_H__
++#define __XEN_PUBLIC_IO_NETIF_H__
 +
-+/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
-+#define HVMOP_set_param           0
-+#define HVMOP_get_param           1
-+struct xen_hvm_param {
-+    domid_t  domid;    /* IN */
-+    uint32_t index;    /* IN */
-+    uint64_t value;    /* IN/OUT */
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Notifications after enqueuing any type of message should be conditional on
++ * the appropriate req_event or rsp_event field in the shared ring.
++ * If the client sends notification for rx requests then it should specify
++ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
++ * that it cannot safely queue packets (as it may not be kicked to send them).
++ */
++
++/*
++ * This is the 'wire' format for packets:
++ *  Request 1: netif_tx_request -- NETTXF_* (any flags)
++ * [Request 2: netif_tx_extra]  (only if request 1 has NETTXF_extra_info)
++ * [Request 3: netif_tx_extra]  (only if request 2 has XEN_NETIF_EXTRA_MORE)
++ *  Request 4: netif_tx_request -- NETTXF_more_data
++ *  Request 5: netif_tx_request -- NETTXF_more_data
++ *  ...
++ *  Request N: netif_tx_request -- 0
++ */
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETTXF_csum_blank     (0)
++#define  NETTXF_csum_blank     (1U<<_NETTXF_csum_blank)
++
++/* Packet data has been validated against protocol checksum. */
++#define _NETTXF_data_validated (1)
++#define  NETTXF_data_validated (1U<<_NETTXF_data_validated)
++
++/* Packet continues in the next request descriptor. */
++#define _NETTXF_more_data      (2)
++#define  NETTXF_more_data      (1U<<_NETTXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETTXF_extra_info     (3)
++#define  NETTXF_extra_info     (1U<<_NETTXF_extra_info)
++
++struct netif_tx_request {
++    grant_ref_t gref;      /* Reference to buffer page */
++    uint16_t offset;       /* Offset within buffer page */
++    uint16_t flags;        /* NETTXF_* */
++    uint16_t id;           /* Echoed in response message. */
++    uint16_t size;         /* Packet size in bytes.       */
 +};
-+typedef struct xen_hvm_param xen_hvm_param_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
++typedef struct netif_tx_request netif_tx_request_t;
 +
-+/* Set the logical level of one of a domain's PCI INTx wires. */
-+#define HVMOP_set_pci_intx_level  2
-+struct xen_hvm_set_pci_intx_level {
-+    /* Domain to be updated. */
-+    domid_t  domid;
-+    /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
-+    uint8_t  domain, bus, device, intx;
-+    /* Assertion level (0 = unasserted, 1 = asserted). */
-+    uint8_t  level;
++/* Types of netif_extra_info descriptors. */
++#define XEN_NETIF_EXTRA_TYPE_NONE      (0)  /* Never used - invalid */
++#define XEN_NETIF_EXTRA_TYPE_GSO       (1)  /* u.gso */
++#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2)  /* u.mcast */
++#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3)  /* u.mcast */
++#define XEN_NETIF_EXTRA_TYPE_MAX       (4)
++
++/* netif_extra_info flags. */
++#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
++#define XEN_NETIF_EXTRA_FLAG_MORE  (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
++
++/* GSO types - only TCPv4 currently supported. */
++#define XEN_NETIF_GSO_TYPE_TCPV4        (1)
++
++/*
++ * This structure needs to fit within both netif_tx_request and
++ * netif_rx_response for compatibility.
++ */
++struct netif_extra_info {
++    uint8_t type;  /* XEN_NETIF_EXTRA_TYPE_* */
++    uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
++
++    union {
++        /*
++         * XEN_NETIF_EXTRA_TYPE_GSO:
++         */
++        struct {
++            /*
++             * Maximum payload size of each segment. For example, for TCP this
++             * is just the path MSS.
++             */
++            uint16_t size;
++
++            /*
++             * GSO type. This determines the protocol of the packet and any
++             * extra features required to segment the packet properly.
++             */
++            uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
++
++            /* Future expansion. */
++            uint8_t pad;
++
++            /*
++             * GSO features. This specifies any extra GSO features required
++             * to process this packet, such as ECN support for TCPv4.
++             */
++            uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
++        } gso;
++
++        /*
++         * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
++         * Backend advertises availability via 'feature-multicast-control'
++         * xenbus node containing value '1'.
++         * Frontend requests this feature by advertising
++         * 'request-multicast-control' xenbus node containing value '1'.
++         * If multicast control is requested then multicast flooding is
++         * disabled and the frontend must explicitly register its interest
++         * in multicast groups using dummy transmit requests containing
++         * MCAST_{ADD,DEL} extra-info fragments.
++         */
++        struct {
++            uint8_t addr[6]; /* Address to add/remove. */
++        } mcast;
++
++        uint16_t pad[3];
++    } u;
 +};
-+typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
++typedef struct netif_extra_info netif_extra_info_t;
 +
-+/* Set the logical level of one of a domain's ISA IRQ wires. */
-+#define HVMOP_set_isa_irq_level   3
-+struct xen_hvm_set_isa_irq_level {
-+    /* Domain to be updated. */
-+    domid_t  domid;
-+    /* ISA device identification, by ISA IRQ (0-15). */
-+    uint8_t  isa_irq;
-+    /* Assertion level (0 = unasserted, 1 = asserted). */
-+    uint8_t  level;
++struct netif_tx_response {
++    uint16_t id;
++    int16_t  status;       /* NETIF_RSP_* */
 +};
-+typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
++typedef struct netif_tx_response netif_tx_response_t;
 +
-+#define HVMOP_set_pci_link_route  4
-+struct xen_hvm_set_pci_link_route {
-+    /* Domain to be updated. */
-+    domid_t  domid;
-+    /* PCI link identifier (0-3). */
-+    uint8_t  link;
-+    /* ISA IRQ (1-15), or 0 (disable link). */
-+    uint8_t  isa_irq;
++struct netif_rx_request {
++    uint16_t    id;        /* Echoed in response message.        */
++    grant_ref_t gref;      /* Reference to incoming granted frame */
 +};
-+typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
++typedef struct netif_rx_request netif_rx_request_t;
 +
-+/* Flushes all VCPU TLBs: @arg must be NULL. */
-+#define HVMOP_flush_tlbs          5
++/* Packet data has been validated against protocol checksum. */
++#define _NETRXF_data_validated (0)
++#define  NETRXF_data_validated (1U<<_NETRXF_data_validated)
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETRXF_csum_blank     (1)
++#define  NETRXF_csum_blank     (1U<<_NETRXF_csum_blank)
++
++/* Packet continues in the next request descriptor. */
++#define _NETRXF_more_data      (2)
++#define  NETRXF_more_data      (1U<<_NETRXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETRXF_extra_info     (3)
++#define  NETRXF_extra_info     (1U<<_NETRXF_extra_info)
++
++struct netif_rx_response {
++    uint16_t id;
++    uint16_t offset;       /* Offset in page of start of received packet  */
++    uint16_t flags;        /* NETRXF_* */
++    int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
++};
++typedef struct netif_rx_response netif_rx_response_t;
 +
-+#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/ioreq.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/ioreq.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/ioreq.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/ioreq.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,122 @@
 +/*
-+ * ioreq.h: I/O request definitions for device models
-+ * Copyright (c) 2004, Intel Corporation.
-+ * 
++ * Generate netif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
++DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
++
++#define NETIF_RSP_DROPPED         -2
++#define NETIF_RSP_ERROR           -1
++#define NETIF_RSP_OKAY             0
++/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
++#define NETIF_RSP_NULL             1
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/pciif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/pciif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,101 @@
++/*
++ * PCI Backend/Frontend Common Data Structures & Macros
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -100424,97 +139613,77 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ *
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
 + */
++#ifndef __XEN_PCI_COMMON_H__
++#define __XEN_PCI_COMMON_H__
 +
-+#ifndef _IOREQ_H_
-+#define _IOREQ_H_
++/* Be sure to bump this number if you change this file */
++#define XEN_PCI_MAGIC "7"
 +
-+#define IOREQ_READ      1
-+#define IOREQ_WRITE     0
++/* xen_pci_sharedinfo flags */
++#define _XEN_PCIF_active     (0)
++#define XEN_PCIF_active      (1<<_XEN_PCI_active)
 +
-+#define STATE_IOREQ_NONE        0
-+#define STATE_IOREQ_READY       1
-+#define STATE_IOREQ_INPROCESS   2
-+#define STATE_IORESP_READY      3
++/* xen_pci_op commands */
++#define XEN_PCI_OP_conf_read    (0)
++#define XEN_PCI_OP_conf_write   (1)
++#define XEN_PCI_OP_enable_msi   (2)
++#define XEN_PCI_OP_disable_msi  (3)
++#define XEN_PCI_OP_enable_msix  (4)
++#define XEN_PCI_OP_disable_msix (5)
 +
-+#define IOREQ_TYPE_PIO          0 /* pio */
-+#define IOREQ_TYPE_COPY         1 /* mmio ops */
-+#define IOREQ_TYPE_AND          2
-+#define IOREQ_TYPE_OR           3
-+#define IOREQ_TYPE_XOR          4
-+#define IOREQ_TYPE_XCHG         5
-+#define IOREQ_TYPE_ADD          6
-+#define IOREQ_TYPE_TIMEOFFSET   7
-+#define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
-+#define IOREQ_TYPE_SUB          9
++/* xen_pci_op error numbers */
++#define XEN_PCI_ERR_success          (0)
++#define XEN_PCI_ERR_dev_not_found   (-1)
++#define XEN_PCI_ERR_invalid_offset  (-2)
++#define XEN_PCI_ERR_access_denied   (-3)
++#define XEN_PCI_ERR_not_implemented (-4)
++/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
++#define XEN_PCI_ERR_op_failed       (-5)
 +
 +/*
-+ * VMExit dispatcher should cooperate with instruction decoder to
-+ * prepare this structure and notify service OS and DM by sending
-+ * virq
++ * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry))
++ * Should not exceed 128
 + */
-+struct ioreq {
-+    uint64_t addr;          /*  physical address            */
-+    uint64_t size;          /*  size in bytes               */
-+    uint64_t count;         /*  for rep prefixes            */
-+    uint64_t data;          /*  data (or paddr of data)     */
-+    uint8_t state:4;
-+    uint8_t data_is_ptr:1;  /*  if 1, data above is the guest paddr 
-+                             *   of the real data to use.   */
-+    uint8_t dir:1;          /*  1=read, 0=write             */
-+    uint8_t df:1;
-+    uint8_t type;           /* I/O type                     */
-+    uint8_t _pad0[6];
-+    uint64_t io_count;      /* How many IO done on a vcpu   */
-+};
-+typedef struct ioreq ioreq_t;
++#define SH_INFO_MAX_VEC     128
 +
-+struct vcpu_iodata {
-+    struct ioreq vp_ioreq;
-+    /* Event channel port, used for notifications to/from the device model. */
-+    uint32_t vp_eport;
-+    uint32_t _pad0;
++struct xen_msix_entry {
++    uint16_t vector;
++    uint16_t entry;
 +};
-+typedef struct vcpu_iodata vcpu_iodata_t;
++struct xen_pci_op {
++    /* IN: what action to perform: XEN_PCI_OP_* */
++    uint32_t cmd;
 +
-+struct shared_iopage {
-+    struct vcpu_iodata   vcpu_iodata[1];
-+};
-+typedef struct shared_iopage shared_iopage_t;
++    /* OUT: will contain an error number (if any) from errno.h */
++    int32_t err;
 +
-+#define IOREQ_BUFFER_SLOT_NUM     80
-+struct buffered_iopage {
-+    unsigned int    read_pointer;
-+    unsigned int    write_pointer;
-+    ioreq_t         ioreq[IOREQ_BUFFER_SLOT_NUM];
-+}; /* NB. Size of this structure must be no greater than one page. */
-+typedef struct buffered_iopage buffered_iopage_t;
++    /* IN: which device to touch */
++    uint32_t domain; /* PCI Domain/Segment */
++    uint32_t bus;
++    uint32_t devfn;
 +
-+#if defined(__ia64__)
-+struct pio_buffer {
-+    uint32_t page_offset;
-+    uint32_t pointer;
-+    uint32_t data_end;
-+    uint32_t buf_size;
-+    void *opaque;
-+};
++    /* IN: which configuration registers to touch */
++    int32_t offset;
++    int32_t size;
 +
-+#define PIO_BUFFER_IDE_PRIMARY   0 /* I/O port = 0x1F0 */
-+#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
-+#define PIO_BUFFER_ENTRY_NUM     2
-+struct buffered_piopage {
-+    struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
-+    uint8_t buffer[1];
++    /* IN/OUT: Contains the result after a READ or the value to WRITE */
++    uint32_t value;
++    /* IN: Contains extra infor for this operation */
++    uint32_t info;
++    /*IN:  param for msi-x */
++    struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC];
 +};
-+#endif /* defined(__ia64__) */
 +
-+#if defined(__i386__) || defined(__x86_64__)
-+#define ACPI_PM1A_EVT_BLK_ADDRESS           0x0000000000001f40
-+#define ACPI_PM1A_CNT_BLK_ADDRESS           (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
-+#define ACPI_PM_TMR_BLK_ADDRESS             (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
-+#endif /* defined(__i386__) || defined(__x86_64__) */
++struct xen_pci_sharedinfo {
++    /* flags - XEN_PCIF_* */
++    uint32_t flags;
++    struct xen_pci_op op;
++};
 +
-+#endif /* _IOREQ_H_ */
++#endif /* __XEN_PCI_COMMON_H__ */
 +
 +/*
 + * Local variables:
@@ -100525,11 +139694,13 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/params.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/params.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/params.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/params.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,60 @@
-+/*
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/protocols.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/protocols.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,40 @@
++/******************************************************************************
++ * protocols.h
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -100549,58 +139720,32 @@
 + * DEALINGS IN THE SOFTWARE.
 + */
 +
-+#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
-+#define __XEN_PUBLIC_HVM_PARAMS_H__
-+
-+#include "hvm_op.h"
-+
-+/*
-+ * Parameter space for HVMOP_{set,get}_param.
-+ */
-+
-+/*
-+ * How should CPU0 event-channel notifications be delivered?
-+ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
-+ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
-+ *                  Domain = val[47:32], Bus  = val[31:16],
-+ *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
-+ * If val == 0 then CPU0 event-channel notifications are not delivered.
-+ */
-+#define HVM_PARAM_CALLBACK_IRQ 0
-+
-+/*
-+ * These are not used by Xen. They are here for convenience of HVM-guest
-+ * xenbus implementations.
-+ */
-+#define HVM_PARAM_STORE_PFN    1
-+#define HVM_PARAM_STORE_EVTCHN 2
-+
-+#define HVM_PARAM_PAE_ENABLED  4
-+
-+#define HVM_PARAM_IOREQ_PFN    5
++#ifndef __XEN_PROTOCOLS_H__
++#define __XEN_PROTOCOLS_H__
 +
-+#define HVM_PARAM_BUFIOREQ_PFN 6
++#define XEN_IO_PROTO_ABI_X86_32     "x86_32-abi"
++#define XEN_IO_PROTO_ABI_X86_64     "x86_64-abi"
++#define XEN_IO_PROTO_ABI_IA64       "ia64-abi"
 +
-+#ifdef __ia64__
-+#define HVM_PARAM_NVRAM_FD     7
-+#define HVM_NR_PARAMS          8
++#if defined(__i386__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
++#elif defined(__x86_64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
++#elif defined(__ia64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
 +#else
-+#define HVM_NR_PARAMS          7
++# error arch fixup needed here
 +#endif
 +
-+#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/save.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/save.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/save.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/save.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,462 @@
-+/* 
-+ * hvm/save.h
-+ *
-+ * Structure definitions for HVM state that is held by Xen and must
-+ * be saved along with the domain's memory and device-model state.
-+ *
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/ring.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/ring.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,307 @@
++/******************************************************************************
++ * ring.h
 + * 
-+ * Copyright (c) 2007 XenSource Ltd.
++ * Shared producer-consumer ring macros.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -100619,448 +139764,299 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_SAVE_H__
-+#define __XEN_PUBLIC_HVM_SAVE_H__
-+
-+/*
-+ * Structures in this header *must* have the same layout in 32bit 
-+ * and 64bit environments: this means that all fields must be explicitly 
-+ * sized types and aligned to their sizes, and the structs must be 
-+ * a multiple of eight bytes long.
 + *
-+ * Only the state necessary for saving and restoring (i.e. fields 
-+ * that are analogous to actual hardware state) should go in this file. 
-+ * Internal mechanisms should be kept in Xen-private headers.
-+ */
-+
-+/* 
-+ * Each entry is preceded by a descriptor giving its type and length
-+ */
-+struct hvm_save_descriptor {
-+    uint16_t typecode;          /* Used to demux the various types below */
-+    uint16_t instance;          /* Further demux within a type */
-+    uint32_t length;            /* In bytes, *not* including this descriptor */
-+};
-+
-+
-+/* 
-+ * Each entry has a datatype associated with it: for example, the CPU state 
-+ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), 
-+ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
-+ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
-+ * ugliness.
-+ */
-+
-+#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type)                   \
-+  struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
-+
-+#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
-+#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
-+#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
-+
-+
-+/* 
-+ * Save/restore header: general info about the save file. 
-+ */
-+
-+#define HVM_FILE_MAGIC   0x54381286
-+#define HVM_FILE_VERSION 0x00000001
-+
-+struct hvm_save_header {
-+    uint32_t magic;             /* Must be HVM_FILE_MAGIC */
-+    uint32_t version;           /* File format version */
-+    uint64_t changeset;         /* Version of Xen that saved this file */
-+    uint32_t cpuid;             /* CPUID[0x01][%eax] on the saving machine */
-+    uint32_t pad0;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
-+
-+
-+/*
-+ * Processor
-+ */
-+
-+struct hvm_hw_cpu {
-+    uint8_t  fpu_regs[512];
-+
-+    uint64_t rax;
-+    uint64_t rbx;
-+    uint64_t rcx;
-+    uint64_t rdx;
-+    uint64_t rbp;
-+    uint64_t rsi;
-+    uint64_t rdi;
-+    uint64_t rsp;
-+    uint64_t r8;
-+    uint64_t r9;
-+    uint64_t r10;
-+    uint64_t r11;
-+    uint64_t r12;
-+    uint64_t r13;
-+    uint64_t r14;
-+    uint64_t r15;
-+
-+    uint64_t rip;
-+    uint64_t rflags;
-+
-+    uint64_t cr0;
-+    uint64_t cr2;
-+    uint64_t cr3;
-+    uint64_t cr4;
-+
-+    uint64_t dr0;
-+    uint64_t dr1;
-+    uint64_t dr2;
-+    uint64_t dr3;
-+    uint64_t dr6;
-+    uint64_t dr7;    
-+
-+    uint32_t cs_sel;
-+    uint32_t ds_sel;
-+    uint32_t es_sel;
-+    uint32_t fs_sel;
-+    uint32_t gs_sel;
-+    uint32_t ss_sel;
-+    uint32_t tr_sel;
-+    uint32_t ldtr_sel;
-+
-+    uint32_t cs_limit;
-+    uint32_t ds_limit;
-+    uint32_t es_limit;
-+    uint32_t fs_limit;
-+    uint32_t gs_limit;
-+    uint32_t ss_limit;
-+    uint32_t tr_limit;
-+    uint32_t ldtr_limit;
-+    uint32_t idtr_limit;
-+    uint32_t gdtr_limit;
-+
-+    uint64_t cs_base;
-+    uint64_t ds_base;
-+    uint64_t es_base;
-+    uint64_t fs_base;
-+    uint64_t gs_base;
-+    uint64_t ss_base;
-+    uint64_t tr_base;
-+    uint64_t ldtr_base;
-+    uint64_t idtr_base;
-+    uint64_t gdtr_base;
-+
-+    uint32_t cs_arbytes;
-+    uint32_t ds_arbytes;
-+    uint32_t es_arbytes;
-+    uint32_t fs_arbytes;
-+    uint32_t gs_arbytes;
-+    uint32_t ss_arbytes;
-+    uint32_t tr_arbytes;
-+    uint32_t ldtr_arbytes;
-+
-+    uint32_t sysenter_cs;
-+    uint32_t padding0;
-+
-+    uint64_t sysenter_esp;
-+    uint64_t sysenter_eip;
-+
-+    /* msr for em64t */
-+    uint64_t shadow_gs;
-+
-+    /* msr content saved/restored. */
-+    uint64_t msr_flags;
-+    uint64_t msr_lstar;
-+    uint64_t msr_star;
-+    uint64_t msr_cstar;
-+    uint64_t msr_syscall_mask;
-+    uint64_t msr_efer;
-+
-+    /* guest's idea of what rdtsc() would return */
-+    uint64_t tsc;
-+
-+    /* pending event, if any */
-+    union {
-+        uint32_t pending_event;
-+        struct {
-+            uint8_t  pending_vector:8;
-+            uint8_t  pending_type:3;
-+            uint8_t  pending_error_valid:1;
-+            uint32_t pending_reserved:19;
-+            uint8_t  pending_valid:1;
-+        };
-+    };
-+    /* error code for pending event */
-+    uint32_t error_code;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
-+
-+
-+/*
-+ * PIC
++ * Tim Deegan and Andrew Warfield November 2004.
 + */
 +
-+struct hvm_hw_vpic {
-+    /* IR line bitmasks. */
-+    uint8_t irr;
-+    uint8_t imr;
-+    uint8_t isr;
-+
-+    /* Line IRx maps to IRQ irq_base+x */
-+    uint8_t irq_base;
-+
-+    /*
-+     * Where are we in ICW2-4 initialisation (0 means no init in progress)?
-+     * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
-+     * Bit 2: ICW1.IC4  (1 == ICW4 included in init sequence)
-+     * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
-+     */
-+    uint8_t init_state:4;
-+
-+    /* IR line with highest priority. */
-+    uint8_t priority_add:4;
-+
-+    /* Reads from A=0 obtain ISR or IRR? */
-+    uint8_t readsel_isr:1;
-+
-+    /* Reads perform a polling read? */
-+    uint8_t poll:1;
-+
-+    /* Automatically clear IRQs from the ISR during INTA? */
-+    uint8_t auto_eoi:1;
-+
-+    /* Automatically rotate IRQ priorities during AEOI? */
-+    uint8_t rotate_on_auto_eoi:1;
-+
-+    /* Exclude slave inputs when considering in-service IRQs? */
-+    uint8_t special_fully_nested_mode:1;
-+
-+    /* Special mask mode excludes masked IRs from AEOI and priority checks. */
-+    uint8_t special_mask_mode:1;
-+
-+    /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
-+    uint8_t is_master:1;
++#ifndef __XEN_PUBLIC_IO_RING_H__
++#define __XEN_PUBLIC_IO_RING_H__
 +
-+    /* Edge/trigger selection. */
-+    uint8_t elcr;
++#include "../xen-compat.h"
 +
-+    /* Virtual INT output. */
-+    uint8_t int_output;
-+};
++#if __XEN_INTERFACE_VERSION__ < 0x00030208
++#define xen_mb()  mb()
++#define xen_rmb() rmb()
++#define xen_wmb() wmb()
++#endif
 +
-+DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
++typedef unsigned int RING_IDX;
 +
++/* Round a 32-bit unsigned constant down to the nearest power of two. */
++#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
++#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
++#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
++#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
++#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
 +
 +/*
-+ * IO-APIC
++ * Calculate size of a shared ring, given the total available space for the
++ * ring and indexes (_sz), and the name tag of the request/response structure.
++ * A ring contains as many entries as will fit, rounded down to the nearest 
++ * power of two (so we can mask with (size-1) to loop around).
 + */
-+
-+#ifdef __ia64__
-+#define VIOAPIC_IS_IOSAPIC 1
-+#define VIOAPIC_NUM_PINS  24
-+#else
-+#define VIOAPIC_NUM_PINS  48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
-+#endif
-+
-+struct hvm_hw_vioapic {
-+    uint64_t base_address;
-+    uint32_t ioregsel;
-+    uint32_t id;
-+    union vioapic_redir_entry
-+    {
-+        uint64_t bits;
-+        struct {
-+            uint8_t vector;
-+            uint8_t delivery_mode:3;
-+            uint8_t dest_mode:1;
-+            uint8_t delivery_status:1;
-+            uint8_t polarity:1;
-+            uint8_t remote_irr:1;
-+            uint8_t trig_mode:1;
-+            uint8_t mask:1;
-+            uint8_t reserve:7;
-+#if !VIOAPIC_IS_IOSAPIC
-+            uint8_t reserved[4];
-+            uint8_t dest_id;
-+#else
-+            uint8_t reserved[3];
-+            uint16_t dest_id;
-+#endif
-+        } fields;
-+    } redirtbl[VIOAPIC_NUM_PINS];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
-+
++#define __RING_SIZE(_s, _sz) \
++    (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
 +
 +/*
-+ * LAPIC
++ * Macros to make the correct C datatypes for a new kind of ring.
++ * 
++ * To make a new ring datatype, you need to have two message structures,
++ * let's say request_t, and response_t already defined.
++ *
++ * In a header where you want the ring datatype declared, you then do:
++ *
++ *     DEFINE_RING_TYPES(mytag, request_t, response_t);
++ *
++ * These expand out to give you a set of types, as you can see below.
++ * The most important of these are:
++ * 
++ *     mytag_sring_t      - The shared ring.
++ *     mytag_front_ring_t - The 'front' half of the ring.
++ *     mytag_back_ring_t  - The 'back' half of the ring.
++ *
++ * To initialize a ring in your code you need to know the location and size
++ * of the shared memory area (PAGE_SIZE, for instance). To initialise
++ * the front half:
++ *
++ *     mytag_front_ring_t front_ring;
++ *     SHARED_RING_INIT((mytag_sring_t *)shared_page);
++ *     FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ *
++ * Initializing the back follows similarly (note that only the front
++ * initializes the shared ring):
++ *
++ *     mytag_back_ring_t back_ring;
++ *     BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
 + */
 +
-+struct hvm_hw_lapic {
-+    uint64_t             apic_base_msr;
-+    uint32_t             disabled; /* VLAPIC_xx_DISABLED */
-+    uint32_t             timer_divisor;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
-+
-+struct hvm_hw_lapic_regs {
-+    /* A 4k page of register state */
-+    uint8_t  data[0x400];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
-+
++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
++                                                                        \
++/* Shared ring entry */                                                 \
++union __name##_sring_entry {                                            \
++    __req_t req;                                                        \
++    __rsp_t rsp;                                                        \
++};                                                                      \
++                                                                        \
++/* Shared ring page */                                                  \
++struct __name##_sring {                                                 \
++    RING_IDX req_prod, req_event;                                       \
++    RING_IDX rsp_prod, rsp_event;                                       \
++    uint8_t  pad[48];                                                   \
++    union __name##_sring_entry ring[1]; /* variable-length */           \
++};                                                                      \
++                                                                        \
++/* "Front" end's private variables */                                   \
++struct __name##_front_ring {                                            \
++    RING_IDX req_prod_pvt;                                              \
++    RING_IDX rsp_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
++/* "Back" end's private variables */                                    \
++struct __name##_back_ring {                                             \
++    RING_IDX rsp_prod_pvt;                                              \
++    RING_IDX req_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
++/* Syntactic sugar */                                                   \
++typedef struct __name##_sring __name##_sring_t;                         \
++typedef struct __name##_front_ring __name##_front_ring_t;               \
++typedef struct __name##_back_ring __name##_back_ring_t
 +
 +/*
-+ * IRQs
++ * Macros for manipulating rings.
++ * 
++ * FRONT_RING_whatever works on the "front end" of a ring: here 
++ * requests are pushed on to the ring and responses taken off it.
++ * 
++ * BACK_RING_whatever works on the "back end" of a ring: here 
++ * requests are taken off the ring and responses put on.
++ * 
++ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 
++ * This is OK in 1-for-1 request-response situations where the 
++ * requestor (front end) never has more than RING_SIZE()-1
++ * outstanding requests.
 + */
 +
-+struct hvm_hw_pci_irqs {
-+    /*
-+     * Virtual interrupt wires for a single PCI bus.
-+     * Indexed by: device*4 + INTx#.
-+     */
-+    union {
-+        DECLARE_BITMAP(i, 32*4);
-+        uint64_t pad[2];
-+    };
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
-+
-+struct hvm_hw_isa_irqs {
-+    /*
-+     * Virtual interrupt wires for ISA devices.
-+     * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
-+     */
-+    union {
-+        DECLARE_BITMAP(i, 16);
-+        uint64_t pad[1];
-+    };
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
-+
-+struct hvm_hw_pci_link {
-+    /*
-+     * PCI-ISA interrupt router.
-+     * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
-+     * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
-+     * The router provides a programmable mapping from each link to a GSI.
-+     */
-+    uint8_t route[4];
-+    uint8_t pad0[4];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
++/* Initialising empty rings */
++#define SHARED_RING_INIT(_s) do {                                       \
++    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
++    (_s)->req_event = (_s)->rsp_event = 1;                              \
++    (void)memset((_s)->pad, 0, sizeof((_s)->pad));                      \
++} while(0)
 +
-+/* 
-+ *  PIT
-+ */
++#define FRONT_RING_INIT(_r, _s, __size) do {                            \
++    (_r)->req_prod_pvt = 0;                                             \
++    (_r)->rsp_cons = 0;                                                 \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
++} while (0)
 +
-+struct hvm_hw_pit {
-+    struct hvm_hw_pit_channel {
-+        uint32_t count; /* can be 65536 */
-+        uint16_t latched_count;
-+        uint8_t count_latched;
-+        uint8_t status_latched;
-+        uint8_t status;
-+        uint8_t read_state;
-+        uint8_t write_state;
-+        uint8_t write_latch;
-+        uint8_t rw_mode;
-+        uint8_t mode;
-+        uint8_t bcd; /* not supported */
-+        uint8_t gate; /* timer start */
-+    } channels[3];  /* 3 x 16 bytes */
-+    uint32_t speaker_data_on;
-+    uint32_t pad0;
-+};
++#define BACK_RING_INIT(_r, _s, __size) do {                             \
++    (_r)->rsp_prod_pvt = 0;                                             \
++    (_r)->req_cons = 0;                                                 \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
++} while (0)
 +
-+DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
++/* Initialize to existing shared indexes -- for recovery */
++#define FRONT_RING_ATTACH(_r, _s, __size) do {                          \
++    (_r)->sring = (_s);                                                 \
++    (_r)->req_prod_pvt = (_s)->req_prod;                                \
++    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++} while (0)
 +
++#define BACK_RING_ATTACH(_r, _s, __size) do {                           \
++    (_r)->sring = (_s);                                                 \
++    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
++    (_r)->req_cons = (_s)->req_prod;                                    \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++} while (0)
 +
-+/* 
-+ * RTC
-+ */ 
++/* How big is this ring? */
++#define RING_SIZE(_r)                                                   \
++    ((_r)->nr_ents)
 +
-+#define RTC_CMOS_SIZE 14
-+struct hvm_hw_rtc {
-+    /* CMOS bytes */
-+    uint8_t cmos_data[RTC_CMOS_SIZE];
-+    /* Index register for 2-part operations */
-+    uint8_t cmos_index;
-+    uint8_t pad0;
-+};
++/* Number of free requests (for use on front side only). */
++#define RING_FREE_REQUESTS(_r)                                          \
++    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
 +
-+DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
++/* Test if there is an empty slot available on the front ring.
++ * (This is only meaningful from the front. )
++ */
++#define RING_FULL(_r)                                                   \
++    (RING_FREE_REQUESTS(_r) == 0)
 +
++/* Test if there are outstanding messages to be processed on a ring. */
++#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
++    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
 +
-+/*
-+ * HPET
-+ */
++#ifdef __GNUC__
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({                             \
++    unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;          \
++    unsigned int rsp = RING_SIZE(_r) -                                  \
++        ((_r)->req_cons - (_r)->rsp_prod_pvt);                          \
++    req < rsp ? req : rsp;                                              \
++})
++#else
++/* Same as above, but without the nice GCC ({ ... }) syntax. */
++#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
++    ((((_r)->sring->req_prod - (_r)->req_cons) <                        \
++      (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ?        \
++     ((_r)->sring->req_prod - (_r)->req_cons) :                         \
++     (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
++#endif
 +
-+#define HPET_TIMER_NUM     3    /* 3 timers supported now */
-+struct hvm_hw_hpet {
-+    /* Memory-mapped, software visible registers */
-+    uint64_t capability;        /* capabilities */
-+    uint64_t res0;              /* reserved */
-+    uint64_t config;            /* configuration */
-+    uint64_t res1;              /* reserved */
-+    uint64_t isr;               /* interrupt status reg */
-+    uint64_t res2[25];          /* reserved */
-+    uint64_t mc64;              /* main counter */
-+    uint64_t res3;              /* reserved */
-+    struct {                    /* timers */
-+        uint64_t config;        /* configuration/cap */
-+        uint64_t cmp;           /* comparator */
-+        uint64_t fsb;           /* FSB route, not supported now */
-+        uint64_t res4;          /* reserved */
-+    } timers[HPET_TIMER_NUM];
-+    uint64_t res5[4*(24-HPET_TIMER_NUM)];  /* reserved, up to 0x3ff */
++/* Direct access to individual ring elements, by index. */
++#define RING_GET_REQUEST(_r, _idx)                                      \
++    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
 +
-+    /* Hidden register state */
-+    uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
-+};
++#define RING_GET_RESPONSE(_r, _idx)                                     \
++    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
 +
-+DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
++/* Loop termination condition: Would the specified index overflow the ring? */
++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
++    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
++
++#define RING_PUSH_REQUESTS(_r) do {                                     \
++    xen_wmb(); /* back sees requests /before/ updated producer index */ \
++    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
++} while (0)
 +
++#define RING_PUSH_RESPONSES(_r) do {                                    \
++    xen_wmb(); /* front sees resps /before/ updated producer index */   \
++    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
++} while (0)
 +
 +/*
-+ * PM timer
++ * Notification hold-off (req_event and rsp_event):
++ * 
++ * When queueing requests or responses on a shared ring, it may not always be
++ * necessary to notify the remote end. For example, if requests are in flight
++ * in a backend, the front may be able to queue further requests without
++ * notifying the back (if the back checks for new requests when it queues
++ * responses).
++ * 
++ * When enqueuing requests or responses:
++ * 
++ *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
++ *  is a boolean return value. True indicates that the receiver requires an
++ *  asynchronous notification.
++ * 
++ * After dequeuing requests or responses (before sleeping the connection):
++ * 
++ *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
++ *  The second argument is a boolean return value. True indicates that there
++ *  are pending messages on the ring (i.e., the connection should not be put
++ *  to sleep).
++ * 
++ *  These macros will set the req_event/rsp_event field to trigger a
++ *  notification on the very next message that is enqueued. If you want to
++ *  create batches of work (i.e., only receive a notification after several
++ *  messages have been enqueued) then you will need to create a customised
++ *  version of the FINAL_CHECK macro in your own code, which sets the event
++ *  field appropriately.
 + */
 +
-+struct hvm_hw_pmtimer {
-+    uint32_t tmr_val;   /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
-+    uint16_t pm1a_sts;  /* PM1a_EVT_BLK.PM1a_STS: status register */
-+    uint16_t pm1a_en;   /* PM1a_EVT_BLK.PM1a_EN: enable register */
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
++    RING_IDX __old = (_r)->sring->req_prod;                             \
++    RING_IDX __new = (_r)->req_prod_pvt;                                \
++    xen_wmb(); /* back sees requests /before/ updated producer index */ \
++    (_r)->sring->req_prod = __new;                                      \
++    xen_mb(); /* back sees new requests /before/ we check req_event */  \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
++                 (RING_IDX)(__new - __old));                            \
++} while (0)
 +
-+/* 
-+ * Largest type-code in use
-+ */
-+#define HVM_SAVE_CODE_MAX 13
++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
++    RING_IDX __old = (_r)->sring->rsp_prod;                             \
++    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
++    xen_wmb(); /* front sees resps /before/ updated producer index */   \
++    (_r)->sring->rsp_prod = __new;                                      \
++    xen_mb(); /* front sees new resps /before/ we check rsp_event */    \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
++                 (RING_IDX)(__new - __old));                            \
++} while (0)
 +
++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
++    xen_mb();                                                           \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
++} while (0)
 +
-+/* 
-+ * The series of save records is teminated by a zero-type, zero-length 
-+ * descriptor.
-+ */
++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
++    xen_mb();                                                           \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
++} while (0)
 +
-+struct hvm_save_end {};
-+DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
++#endif /* __XEN_PUBLIC_IO_RING_H__ */
 +
-+#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/hvm/vmx_assist.h tmp-linux-2.6-xen.patch/include/xen/interface/hvm/vmx_assist.h
---- pristine-linux-2.6.18.2/include/xen/interface/hvm/vmx_assist.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/hvm/vmx_assist.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,122 @@
 +/*
-+ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/tpmif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/tpmif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,77 @@
++/******************************************************************************
++ * tpmif.h
++ *
++ * TPM I/O interface for Xen guest OSes.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -101080,97 +140076,50 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Leendert van Doorn, leendert at watson.ibm.com
-+ * Copyright (c) 2005, International Business Machines Corporation.
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from tools/libxc/xen/io/netif.h
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
 + */
 +
-+#ifndef _VMX_ASSIST_H_
-+#define _VMX_ASSIST_H_
-+
-+#define VMXASSIST_BASE         0xD0000
-+#define VMXASSIST_MAGIC        0x17101966
-+#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
-+
-+#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
-+#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
-+
-+#ifndef __ASSEMBLY__
++#ifndef __XEN_PUBLIC_IO_TPMIF_H__
++#define __XEN_PUBLIC_IO_TPMIF_H__
 +
-+#define NR_EXCEPTION_HANDLER    32
-+#define NR_INTERRUPT_HANDLERS   16
-+#define NR_TRAPS        (NR_EXCEPTION_HANDLER+NR_INTERRUPT_HANDLERS)
++#include "../grant_table.h"
 +
-+union vmcs_arbytes {
-+    struct arbyte_fields {
-+        unsigned int seg_type : 4,
-+            s         : 1,
-+            dpl       : 2,
-+            p         : 1,
-+            reserved0 : 4,
-+            avl       : 1,
-+            reserved1 : 1,
-+            default_ops_size: 1,
-+            g         : 1,
-+            null_bit  : 1,
-+            reserved2 : 15;
-+    } fields;
-+    unsigned int bytes;
++struct tpmif_tx_request {
++    unsigned long addr;   /* Machine address of packet.   */
++    grant_ref_t ref;      /* grant table access reference */
++    uint16_t unused;
++    uint16_t size;        /* Packet size in bytes.        */
 +};
++typedef struct tpmif_tx_request tpmif_tx_request_t;
 +
 +/*
-+ * World switch state
++ * The TPMIF_TX_RING_SIZE defines the number of pages the
++ * front-end and backend can exchange (= size of array).
 + */
-+struct vmx_assist_context {
-+    uint32_t  eip;        /* execution pointer */
-+    uint32_t  esp;        /* stack pointer */
-+    uint32_t  eflags;     /* flags register */
-+    uint32_t  cr0;
-+    uint32_t  cr3;        /* page table directory */
-+    uint32_t  cr4;
-+    uint32_t  idtr_limit; /* idt */
-+    uint32_t  idtr_base;
-+    uint32_t  gdtr_limit; /* gdt */
-+    uint32_t  gdtr_base;
-+    uint32_t  cs_sel;     /* cs selector */
-+    uint32_t  cs_limit;
-+    uint32_t  cs_base;
-+    union vmcs_arbytes cs_arbytes;
-+    uint32_t  ds_sel;     /* ds selector */
-+    uint32_t  ds_limit;
-+    uint32_t  ds_base;
-+    union vmcs_arbytes ds_arbytes;
-+    uint32_t  es_sel;     /* es selector */
-+    uint32_t  es_limit;
-+    uint32_t  es_base;
-+    union vmcs_arbytes es_arbytes;
-+    uint32_t  ss_sel;     /* ss selector */
-+    uint32_t  ss_limit;
-+    uint32_t  ss_base;
-+    union vmcs_arbytes ss_arbytes;
-+    uint32_t  fs_sel;     /* fs selector */
-+    uint32_t  fs_limit;
-+    uint32_t  fs_base;
-+    union vmcs_arbytes fs_arbytes;
-+    uint32_t  gs_sel;     /* gs selector */
-+    uint32_t  gs_limit;
-+    uint32_t  gs_base;
-+    union vmcs_arbytes gs_arbytes;
-+    uint32_t  tr_sel;     /* task selector */
-+    uint32_t  tr_limit;
-+    uint32_t  tr_base;
-+    union vmcs_arbytes tr_arbytes;
-+    uint32_t  ldtr_sel;   /* ldtr selector */
-+    uint32_t  ldtr_limit;
-+    uint32_t  ldtr_base;
-+    union vmcs_arbytes ldtr_arbytes;
++typedef uint32_t TPMIF_RING_IDX;
 +
-+    unsigned char rm_irqbase[2];
++#define TPMIF_TX_RING_SIZE 1
++
++/* This structure must fit in a memory page. */
++
++struct tpmif_ring {
++    struct tpmif_tx_request req;
 +};
-+typedef struct vmx_assist_context vmx_assist_context_t;
++typedef struct tpmif_ring tpmif_ring_t;
 +
-+#endif /* __ASSEMBLY__ */
++struct tpmif_tx_interface {
++    struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
++};
++typedef struct tpmif_tx_interface tpmif_tx_interface_t;
 +
-+#endif /* _VMX_ASSIST_H_ */
++#endif
 +
 +/*
 + * Local variables:
@@ -101181,14 +140130,14 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/blkif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/blkif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/blkif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/blkif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,128 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/vscsiif.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/vscsiif.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,105 @@
 +/******************************************************************************
-+ * blkif.h
++ * vscsiif.h
 + * 
-+ * Unified block-device I/O interface for Xen guest OSes.
++ * Based on the blkif.h code.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -101208,102 +140157,79 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2003-2004, Keir Fraser
++ * Copyright(c) FUJITSU Limited 2008.
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
-+#define __XEN_PUBLIC_IO_BLKIF_H__
++#ifndef __XEN__PUBLIC_IO_SCSI_H__
++#define __XEN__PUBLIC_IO_SCSI_H__
 +
 +#include "ring.h"
 +#include "../grant_table.h"
 +
-+/*
-+ * Front->back notifications: When enqueuing a new request, sending a
-+ * notification can be made conditional on req_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Backends must set
-+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
-+ * 
-+ * Back->front notifications: When enqueuing a new response, sending a
-+ * notification can be made conditional on rsp_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Frontends must set
-+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
-+ */
++/* command between backend and frontend */
++#define VSCSIIF_ACT_SCSI_CDB         1    /* SCSI CDB command */
++#define VSCSIIF_ACT_SCSI_ABORT       2    /* SCSI Device(Lun) Abort*/
++#define VSCSIIF_ACT_SCSI_RESET       3    /* SCSI Device(Lun) Reset*/
 +
-+#ifndef blkif_vdev_t
-+#define blkif_vdev_t   uint16_t
-+#endif
-+#define blkif_sector_t uint64_t
 +
-+/*
-+ * REQUEST CODES.
-+ */
-+#define BLKIF_OP_READ              0
-+#define BLKIF_OP_WRITE             1
-+/*
-+ * Recognised only if "feature-barrier" is present in backend xenbus info.
-+ * The "feature_barrier" node contains a boolean indicating whether barrier
-+ * requests are likely to succeed or fail. Either way, a barrier request
-+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
-+ * the underlying block-device hardware. The boolean simply indicates whether
-+ * or not it is worthwhile for the frontend to attempt barrier requests.
-+ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
-+ * create the "feature-barrier" node!
-+ */
-+#define BLKIF_OP_WRITE_BARRIER     2
++#define VSCSIIF_BACK_MAX_PENDING_REQS    128
 +
 +/*
 + * Maximum scatter/gather segments per request.
-+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
-+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
++ *
++ * Considering balance between allocating al least 16 "vscsiif_request"
++ * structures on one page (4096bytes) and number of scatter gather 
++ * needed, we decided to use 26 as a magic number.
 + */
-+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
++#define VSCSIIF_SG_TABLESIZE             26
 +
-+struct blkif_request_segment {
-+    grant_ref_t gref;        /* reference to I/O buffer frame        */
-+    /* @first_sect: first sector in frame to transfer (inclusive).   */
-+    /* @last_sect: last sector in frame to transfer (inclusive).     */
-+    uint8_t     first_sect, last_sect;
-+};
++/*
++ * base on linux kernel 2.6.18
++ */
++#define VSCSIIF_MAX_COMMAND_SIZE         16
++#define VSCSIIF_SENSE_BUFFERSIZE         96
 +
-+struct blkif_request {
-+    uint8_t        operation;    /* BLKIF_OP_???                         */
-+    uint8_t        nr_segments;  /* number of segments                   */
-+    blkif_vdev_t   handle;       /* only for read/write requests         */
-+    uint64_t       id;           /* private guest value, echoed in resp  */
-+    blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-+    struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+typedef struct blkif_request blkif_request_t;
 +
-+struct blkif_response {
-+    uint64_t        id;              /* copied from request */
-+    uint8_t         operation;       /* copied from request */
-+    int16_t         status;          /* BLKIF_RSP_???       */
-+};
-+typedef struct blkif_response blkif_response_t;
++struct vscsiif_request {
++    uint16_t rqid;          /* private guest value, echoed in resp  */
++    uint8_t act;            /* command between backend and frontend */
++    uint8_t cmd_len;
 +
-+/*
-+ * STATUS RETURN CODES.
-+ */
-+ /* Operation not supported (only happens on barrier writes). */
-+#define BLKIF_RSP_EOPNOTSUPP  -2
-+ /* Operation failed for some unspecified reason (-EIO). */
-+#define BLKIF_RSP_ERROR       -1
-+ /* Operation completed successfully. */
-+#define BLKIF_RSP_OKAY         0
++    uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++    uint16_t timeout_per_command;     /* The command is issued by twice 
++                                         the value in Backend. */
++    uint16_t channel, id, lun;
++    uint16_t padding;
++    uint8_t sc_data_direction;        /* for DMA_TO_DEVICE(1)
++                                         DMA_FROM_DEVICE(2)
++                                         DMA_NONE(3) requests  */
++    uint8_t nr_segments;              /* Number of pieces of scatter-gather */
 +
-+/*
-+ * Generate blkif ring structures and types.
-+ */
++    struct scsiif_request_segment {
++        grant_ref_t gref;
++        uint16_t offset;
++        uint16_t length;
++    } seg[VSCSIIF_SG_TABLESIZE];
++    uint32_t reserved[3];
++};
++typedef struct vscsiif_request vscsiif_request_t;
 +
-+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
++struct vscsiif_response {
++    uint16_t rqid;
++    uint8_t padding;
++    uint8_t sense_len;
++    uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
++    int32_t rslt;
++    uint32_t residual_len;     /* request bufflen - 
++                                  return the value from physical device */
++    uint32_t reserved[36];
++};
++typedef struct vscsiif_response vscsiif_response_t;
 +
-+#define VDISK_CDROM        0x1
-+#define VDISK_REMOVABLE    0x2
-+#define VDISK_READONLY     0x4
++DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
 +
-+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
 +
++#endif  /*__XEN__PUBLIC_IO_SCSI_H__*/
 +/*
 + * Local variables:
 + * mode: C
@@ -101313,15 +140239,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/console.h tmp-linux-2.6-xen.patch/include/xen/interface/io/console.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/console.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/console.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,51 @@
-+/******************************************************************************
-+ * console.h
-+ * 
-+ * Console I/O interface for Xen guest OSes.
-+ * 
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/xenbus.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/xenbus.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,80 @@
++/*****************************************************************************
++ * xenbus.h
++ *
++ * Xenbus protocol details.
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -101340,24 +140266,53 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2005, Keir Fraser
++ * Copyright (C) 2005 XenSource Ltd.
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
-+#define __XEN_PUBLIC_IO_CONSOLE_H__
++#ifndef _XEN_PUBLIC_IO_XENBUS_H
++#define _XEN_PUBLIC_IO_XENBUS_H
 +
-+typedef uint32_t XENCONS_RING_IDX;
++/*
++ * The state of either end of the Xenbus, i.e. the current communication
++ * status of initialisation across the bus.  States here imply nothing about
++ * the state of the connection between the driver and the kernel's device
++ * layers.
++ */
++enum xenbus_state {
++    XenbusStateUnknown       = 0,
 +
-+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
++    XenbusStateInitialising  = 1,
 +
-+struct xencons_interface {
-+    char in[1024];
-+    char out[2048];
-+    XENCONS_RING_IDX in_cons, in_prod;
-+    XENCONS_RING_IDX out_cons, out_prod;
++    /*
++     * InitWait: Finished early initialisation but waiting for information
++     * from the peer or hotplug scripts.
++     */
++    XenbusStateInitWait      = 2,
++
++    /*
++     * Initialised: Waiting for a connection from the peer.
++     */
++    XenbusStateInitialised   = 3,
++
++    XenbusStateConnected     = 4,
++
++    /*
++     * Closing: The device is being closed due to an error or an unplug event.
++     */
++    XenbusStateClosing       = 5,
++
++    XenbusStateClosed        = 6,
++
++    /*
++     * Reconfiguring: The device is being reconfigured.
++     */
++    XenbusStateReconfiguring = 7,
++
++    XenbusStateReconfigured  = 8
 +};
++typedef enum xenbus_state XenbusState;
 +
-+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
++#endif /* _XEN_PUBLIC_IO_XENBUS_H */
 +
 +/*
 + * Local variables:
@@ -101368,12 +140323,13 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/fbif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/fbif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/fbif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/fbif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,138 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/io/xs_wire.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/io/xs_wire.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,130 @@
 +/*
-+ * fbif.h -- Xen virtual frame buffer device
++ * Details of the "wire" protocol between Xen Store Daemon and client
++ * library or guest kernel.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -101393,113 +140349,104 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ * Copyright (C) 2005 Rusty Russell IBM Corporation
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_FBIF_H__
-+#define __XEN_PUBLIC_IO_FBIF_H__
-+
-+/* Out events (frontend -> backend) */
++#ifndef _XS_WIRE_H
++#define _XS_WIRE_H
 +
-+/*
-+ * Out events may be sent only when requested by backend, and receipt
-+ * of an unknown out event is an error.
-+ */
++enum xsd_sockmsg_type
++{
++    XS_DEBUG,
++    XS_DIRECTORY,
++    XS_READ,
++    XS_GET_PERMS,
++    XS_WATCH,
++    XS_UNWATCH,
++    XS_TRANSACTION_START,
++    XS_TRANSACTION_END,
++    XS_INTRODUCE,
++    XS_RELEASE,
++    XS_GET_DOMAIN_PATH,
++    XS_WRITE,
++    XS_MKDIR,
++    XS_RM,
++    XS_SET_PERMS,
++    XS_WATCH_EVENT,
++    XS_ERROR,
++    XS_IS_DOMAIN_INTRODUCED,
++    XS_RESUME,
++    XS_SET_TARGET
++};
 +
-+/* Event type 1 currently not used */
-+/*
-+ * Framebuffer update notification event
-+ * Capable frontend sets feature-update in xenstore.
-+ * Backend requests it by setting request-update in xenstore.
-+ */
-+#define XENFB_TYPE_UPDATE 2
++#define XS_WRITE_NONE "NONE"
++#define XS_WRITE_CREATE "CREATE"
++#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
 +
-+struct xenfb_update
++/* We hand errors as strings, for portability. */
++struct xsd_errors
 +{
-+    uint8_t type;    /* XENFB_TYPE_UPDATE */
-+    int32_t x;      /* source x */
-+    int32_t y;      /* source y */
-+    int32_t width;  /* rect width */
-+    int32_t height; /* rect height */
++    int errnum;
++    const char *errstring;
++};
++#define XSD_ERROR(x) { x, #x }
++/* LINTED: static unused */
++static struct xsd_errors xsd_errors[]
++#if defined(__GNUC__)
++__attribute__((unused))
++#endif
++    = {
++    XSD_ERROR(EINVAL),
++    XSD_ERROR(EACCES),
++    XSD_ERROR(EEXIST),
++    XSD_ERROR(EISDIR),
++    XSD_ERROR(ENOENT),
++    XSD_ERROR(ENOMEM),
++    XSD_ERROR(ENOSPC),
++    XSD_ERROR(EIO),
++    XSD_ERROR(ENOTEMPTY),
++    XSD_ERROR(ENOSYS),
++    XSD_ERROR(EROFS),
++    XSD_ERROR(EBUSY),
++    XSD_ERROR(EAGAIN),
++    XSD_ERROR(EISCONN)
 +};
 +
-+#define XENFB_OUT_EVENT_SIZE 40
-+
-+union xenfb_out_event
++struct xsd_sockmsg
 +{
-+    uint8_t type;
-+    struct xenfb_update update;
-+    char pad[XENFB_OUT_EVENT_SIZE];
-+};
-+
-+/* In events (backend -> frontend) */
-+
-+/*
-+ * Frontends should ignore unknown in events.
-+ * No in events currently defined.
-+ */
-+
-+#define XENFB_IN_EVENT_SIZE 40
++    uint32_t type;  /* XS_??? */
++    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
++    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
++    uint32_t len;   /* Length of data following this. */
 +
-+union xenfb_in_event
-+{
-+    uint8_t type;
-+    char pad[XENFB_IN_EVENT_SIZE];
++    /* Generally followed by nul-terminated string(s). */
 +};
 +
-+/* shared page */
-+
-+#define XENFB_IN_RING_SIZE 1024
-+#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
-+#define XENFB_IN_RING_OFFS 1024
-+#define XENFB_IN_RING(page) \
-+    ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
-+#define XENFB_IN_RING_REF(page, idx) \
-+    (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
-+
-+#define XENFB_OUT_RING_SIZE 2048
-+#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
-+#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
-+#define XENFB_OUT_RING(page) \
-+    ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
-+#define XENFB_OUT_RING_REF(page, idx) \
-+    (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
-+
-+struct xenfb_page
++enum xs_watch_type
 +{
-+    uint32_t in_cons, in_prod;
-+    uint32_t out_cons, out_prod;
-+
-+    int32_t width;          /* the width of the framebuffer (in pixels) */
-+    int32_t height;         /* the height of the framebuffer (in pixels) */
-+    uint32_t line_length;   /* the length of a row of pixels (in bytes) */
-+    uint32_t mem_length;    /* the length of the framebuffer (in bytes) */
-+    uint8_t depth;          /* the depth of a pixel (in bits) */
++    XS_WATCH_PATH = 0,
++    XS_WATCH_TOKEN
++};
 +
-+    /*
-+     * Framebuffer page directory
-+     *
-+     * Each directory page holds PAGE_SIZE / sizeof(*pd)
-+     * framebuffer pages, and can thus map up to PAGE_SIZE *
-+     * PAGE_SIZE / sizeof(*pd) bytes.  With PAGE_SIZE == 4096 and
-+     * sizeof(unsigned long) == 4, that's 4 Megs.  Two directory
-+     * pages should be enough for a while.
-+     */
-+    unsigned long pd[2];
++/* Inter-domain shared memory communications. */
++#define XENSTORE_RING_SIZE 1024
++typedef uint32_t XENSTORE_RING_IDX;
++#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
++struct xenstore_domain_interface {
++    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
++    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
++    XENSTORE_RING_IDX req_cons, req_prod;
++    XENSTORE_RING_IDX rsp_cons, rsp_prod;
 +};
 +
-+/*
-+ * Wart: xenkbd needs to know resolution.  Put it here until a better
-+ * solution is found, but don't leak it to the backend.
-+ */
-+#ifdef __KERNEL__
-+#define XENFB_WIDTH 800
-+#define XENFB_HEIGHT 600
-+#define XENFB_DEPTH 32
-+#endif
++/* Violating this is very bad.  See docs/misc/xenstore.txt. */
++#define XENSTORE_PAYLOAD_MAX 4096
 +
-+#endif
++/* Violating these just gets you an error back */
++#define XENSTORE_ABS_PATH_MAX 3072
++#define XENSTORE_REL_PATH_MAX 2048
++
++#endif /* _XS_WIRE_H */
 +
 +/*
 + * Local variables:
@@ -101510,13 +140457,13 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/kbdif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/kbdif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/kbdif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/kbdif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,130 @@
-+/*
-+ * kbdif.h -- Xen virtual keyboard/mouse
-+ *
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/kexec.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/kexec.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,189 @@
++/******************************************************************************
++ * kexec.h - Public portion
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -101534,106 +140481,165 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ * 
++ * Xen port written by:
++ * - Simon 'Horms' Horman <horms at verge.net.au>
++ * - Magnus Damm <magnus at valinux.co.jp>
++ */
++
++#ifndef _XEN_PUBLIC_KEXEC_H
++#define _XEN_PUBLIC_KEXEC_H
++
++
++/* This file describes the Kexec / Kdump hypercall interface for Xen.
 + *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ * Kexec under vanilla Linux allows a user to reboot the physical machine 
++ * into a new user-specified kernel. The Xen port extends this idea
++ * to allow rebooting of the machine from dom0. When kexec for dom0
++ * is used to reboot,  both the hypervisor and the domains get replaced
++ * with some other kernel. It is possible to kexec between vanilla
++ * Linux and Xen and back again. Xen to Xen works well too.
++ *
++ * The hypercall interface for kexec can be divided into three main
++ * types of hypercall operations:
++ *
++ * 1) Range information:
++ *    This is used by the dom0 kernel to ask the hypervisor about various 
++ *    address information. This information is needed to allow kexec-tools 
++ *    to fill in the ELF headers for /proc/vmcore properly.
++ *
++ * 2) Load and unload of images:
++ *    There are no big surprises here, the kexec binary from kexec-tools
++ *    runs in userspace in dom0. The tool loads/unloads data into the
++ *    dom0 kernel such as new kernel, initramfs and hypervisor. When
++ *    loaded the dom0 kernel performs a load hypercall operation, and
++ *    before releasing all page references the dom0 kernel calls unload.
++ *
++ * 3) Kexec operation:
++ *    This is used to start a previously loaded kernel.
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_KBDIF_H__
-+#define __XEN_PUBLIC_IO_KBDIF_H__
++#include "xen.h"
 +
-+/* In events (backend -> frontend) */
++#if defined(__i386__) || defined(__x86_64__)
++#define KEXEC_XEN_NO_PAGES 17
++#endif
 +
 +/*
-+ * Frontends should ignore unknown in events.
++ * Prototype for this hypercall is:
++ *  int kexec_op(int cmd, void *args)
++ * @cmd  == KEXEC_CMD_... 
++ *          KEXEC operation to perform
++ * @args == Operation-specific extra arguments (NULL if none).
 + */
 +
-+/* Pointer movement event */
-+#define XENKBD_TYPE_MOTION  1
-+/* Event type 2 currently not used */
-+/* Key event (includes pointer buttons) */
-+#define XENKBD_TYPE_KEY     3
 +/*
-+ * Pointer position event
-+ * Capable backend sets feature-abs-pointer in xenstore.
-+ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
-+ * request-abs-update in xenstore.
++ * Kexec supports two types of operation:
++ * - kexec into a regular kernel, very similar to a standard reboot
++ *   - KEXEC_TYPE_DEFAULT is used to specify this type
++ * - kexec into a special "crash kernel", aka kexec-on-panic
++ *   - KEXEC_TYPE_CRASH is used to specify this type
++ *   - parts of our system may be broken at kexec-on-panic time
++ *     - the code should be kept as simple and self-contained as possible
 + */
-+#define XENKBD_TYPE_POS     4
-+
-+struct xenkbd_motion
-+{
-+    uint8_t type;        /* XENKBD_TYPE_MOTION */
-+    int32_t rel_x;       /* relative X motion */
-+    int32_t rel_y;       /* relative Y motion */
-+};
-+
-+struct xenkbd_key
-+{
-+    uint8_t type;         /* XENKBD_TYPE_KEY */
-+    uint8_t pressed;      /* 1 if pressed; 0 otherwise */
-+    uint32_t keycode;     /* KEY_* from linux/input.h */
-+};
-+
-+struct xenkbd_position
-+{
-+    uint8_t type;        /* XENKBD_TYPE_POS */
-+    int32_t abs_x;       /* absolute X position (in FB pixels) */
-+    int32_t abs_y;       /* absolute Y position (in FB pixels) */
-+};
 +
-+#define XENKBD_IN_EVENT_SIZE 40
++#define KEXEC_TYPE_DEFAULT 0
++#define KEXEC_TYPE_CRASH   1
 +
-+union xenkbd_in_event
-+{
-+    uint8_t type;
-+    struct xenkbd_motion motion;
-+    struct xenkbd_key key;
-+    struct xenkbd_position pos;
-+    char pad[XENKBD_IN_EVENT_SIZE];
-+};
 +
-+/* Out events (frontend -> backend) */
++/* The kexec implementation for Xen allows the user to load two
++ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
++ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
++ * per "instance". The data mainly consists of machine address lists to pages
++ * together with destination addresses. The data in xen_kexec_image_t
++ * is passed to the "code page" which is one page of code that performs
++ * the final relocations before jumping to the new kernel.
++ */
++ 
++typedef struct xen_kexec_image {
++#if defined(__i386__) || defined(__x86_64__)
++    unsigned long page_list[KEXEC_XEN_NO_PAGES];
++#endif
++#if defined(__ia64__)
++    unsigned long reboot_code_buffer;
++#endif
++    unsigned long indirection_page;
++    unsigned long start_address;
++} xen_kexec_image_t;
 +
 +/*
-+ * Out events may be sent only when requested by backend, and receipt
-+ * of an unknown out event is an error.
-+ * No out events currently defined.
++ * Perform kexec having previously loaded a kexec or kdump kernel
++ * as appropriate.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
 + */
++#define KEXEC_CMD_kexec                 0
++typedef struct xen_kexec_exec {
++    int type;
++} xen_kexec_exec_t;
 +
-+#define XENKBD_OUT_EVENT_SIZE 40
-+
-+union xenkbd_out_event
-+{
-+    uint8_t type;
-+    char pad[XENKBD_OUT_EVENT_SIZE];
-+};
-+
-+/* shared page */
++/*
++ * Load/Unload kernel image for kexec or kdump.
++ * type  == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ * image == relocation information for kexec (ignored for unload) [in]
++ */
++#define KEXEC_CMD_kexec_load            1
++#define KEXEC_CMD_kexec_unload          2
++typedef struct xen_kexec_load {
++    int type;
++    xen_kexec_image_t image;
++} xen_kexec_load_t;
 +
-+#define XENKBD_IN_RING_SIZE 2048
-+#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
-+#define XENKBD_IN_RING_OFFS 1024
-+#define XENKBD_IN_RING(page) \
-+    ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
-+#define XENKBD_IN_RING_REF(page, idx) \
-+    (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
++#define KEXEC_RANGE_MA_CRASH      0 /* machine address and size of crash area */
++#define KEXEC_RANGE_MA_XEN        1 /* machine address and size of Xen itself */
++#define KEXEC_RANGE_MA_CPU        2 /* machine address and size of a CPU note */
++#define KEXEC_RANGE_MA_XENHEAP    3 /* machine address and size of xenheap
++                                     * Note that although this is adjacent
++                                     * to Xen it exists in a separate EFI
++                                     * region on ia64, and thus needs to be
++                                     * inserted into iomem_machine separately */
++#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
++                                     * the ia64_boot_param */
++#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
++                                     * of the EFI Memory Map */
++#define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */
 +
-+#define XENKBD_OUT_RING_SIZE 1024
-+#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
-+#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
-+#define XENKBD_OUT_RING(page) \
-+    ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
-+#define XENKBD_OUT_RING_REF(page, idx) \
-+    (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
++/*
++ * Find the address and size of certain memory areas
++ * range == KEXEC_RANGE_... [in]
++ * nr    == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
++ * size  == number of bytes reserved in window [out]
++ * start == address of the first byte in the window [out]
++ */
++#define KEXEC_CMD_kexec_get_range       3
++typedef struct xen_kexec_range {
++    int range;
++    int nr;
++    unsigned long size;
++    unsigned long start;
++} xen_kexec_range_t;
 +
-+struct xenkbd_page
-+{
-+    uint32_t in_cons, in_prod;
-+    uint32_t out_cons, out_prod;
-+};
++/* vmcoreinfo stuff */
++#define VMCOREINFO_BYTES           (4096)
++#define VMCOREINFO_NOTE_NAME       "VMCOREINFO_XEN"
++void arch_crash_save_vmcoreinfo(void);
++void vmcoreinfo_append_str(const char *fmt, ...)
++       __attribute__ ((format (printf, 1, 2)));
++#define VMCOREINFO_PAGESIZE(value) \
++       vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
++#define VMCOREINFO_SYMBOL(name) \
++       vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
++#define VMCOREINFO_SYMBOL_ALIAS(alias, name) \
++       vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #alias, (unsigned long)&name)
++#define VMCOREINFO_STRUCT_SIZE(name) \
++       vmcoreinfo_append_str("SIZE(%s)=%zu\n", #name, sizeof(struct name))
++#define VMCOREINFO_OFFSET(name, field) \
++       vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
++                             (unsigned long)offsetof(struct name, field))
++#define VMCOREINFO_OFFSET_ALIAS(name, field, alias) \
++       vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #alias, \
++                             (unsigned long)offsetof(struct name, field))
 +
-+#endif
++#endif /* _XEN_PUBLIC_KEXEC_H */
 +
 +/*
 + * Local variables:
@@ -101644,14 +140650,12 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/netif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/netif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/netif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/netif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,184 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/libelf.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/libelf.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,265 @@
 +/******************************************************************************
-+ * netif.h
-+ * 
-+ * Unified network-device I/O interface for Xen guest OSes.
++ * libelf.h
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -101670,175 +140674,260 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_NETIF_H__
-+#define __XEN_PUBLIC_IO_NETIF_H__
++#ifndef __XC_LIBELF__
++#define __XC_LIBELF__ 1
 +
-+#include "ring.h"
-+#include "../grant_table.h"
++#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
++#define XEN_ELF_LITTLE_ENDIAN
++#else
++#error define architectural endianness
++#endif
 +
-+/*
-+ * Notifications after enqueuing any type of message should be conditional on
-+ * the appropriate req_event or rsp_event field in the shared ring.
-+ * If the client sends notification for rx requests then it should specify
-+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
-+ * that it cannot safely queue packets (as it may not be kicked to send them).
-+ */
++#undef ELFSIZE
++#include "elfnote.h"
++#include "elfstructs.h"
++#include "features.h"
 +
-+/*
-+ * This is the 'wire' format for packets:
-+ *  Request 1: netif_tx_request -- NETTXF_* (any flags)
-+ * [Request 2: netif_tx_extra]  (only if request 1 has NETTXF_extra_info)
-+ * [Request 3: netif_tx_extra]  (only if request 2 has XEN_NETIF_EXTRA_MORE)
-+ *  Request 4: netif_tx_request -- NETTXF_more_data
-+ *  Request 5: netif_tx_request -- NETTXF_more_data
-+ *  ...
-+ *  Request N: netif_tx_request -- 0
-+ */
++/* ------------------------------------------------------------------------ */
 +
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETTXF_csum_blank     (0)
-+#define  NETTXF_csum_blank     (1U<<_NETTXF_csum_blank)
++typedef union {
++    Elf32_Ehdr e32;
++    Elf64_Ehdr e64;
++} elf_ehdr;
 +
-+/* Packet data has been validated against protocol checksum. */
-+#define _NETTXF_data_validated (1)
-+#define  NETTXF_data_validated (1U<<_NETTXF_data_validated)
++typedef union {
++    Elf32_Phdr e32;
++    Elf64_Phdr e64;
++} elf_phdr;
 +
-+/* Packet continues in the next request descriptor. */
-+#define _NETTXF_more_data      (2)
-+#define  NETTXF_more_data      (1U<<_NETTXF_more_data)
++typedef union {
++    Elf32_Shdr e32;
++    Elf64_Shdr e64;
++} elf_shdr;
 +
-+/* Packet to be followed by extra descriptor(s). */
-+#define _NETTXF_extra_info     (3)
-+#define  NETTXF_extra_info     (1U<<_NETTXF_extra_info)
++typedef union {
++    Elf32_Sym e32;
++    Elf64_Sym e64;
++} elf_sym;
 +
-+struct netif_tx_request {
-+    grant_ref_t gref;      /* Reference to buffer page */
-+    uint16_t offset;       /* Offset within buffer page */
-+    uint16_t flags;        /* NETTXF_* */
-+    uint16_t id;           /* Echoed in response message. */
-+    uint16_t size;         /* Packet size in bytes.       */
++typedef union {
++    Elf32_Rel e32;
++    Elf64_Rel e64;
++} elf_rel;
++
++typedef union {
++    Elf32_Rela e32;
++    Elf64_Rela e64;
++} elf_rela;
++
++typedef union {
++    Elf32_Note e32;
++    Elf64_Note e64;
++} elf_note;
++
++struct elf_binary {
++    /* elf binary */
++    const char *image;
++    size_t size;
++    char class;
++    char data;
++
++    const elf_ehdr *ehdr;
++    const char *sec_strtab;
++    const elf_shdr *sym_tab;
++    const char *sym_strtab;
++
++    /* loaded to */
++    char *dest;
++    uint64_t pstart;
++    uint64_t pend;
++    uint64_t reloc_offset;
++
++    uint64_t bsd_symtab_pstart;
++    uint64_t bsd_symtab_pend;
++
++#ifndef __XEN__
++    /* misc */
++    FILE *log;
++#endif
++    int verbose;
 +};
-+typedef struct netif_tx_request netif_tx_request_t;
 +
-+/* Types of netif_extra_info descriptors. */
-+#define XEN_NETIF_EXTRA_TYPE_NONE  (0)  /* Never used - invalid */
-+#define XEN_NETIF_EXTRA_TYPE_GSO   (1)  /* u.gso */
-+#define XEN_NETIF_EXTRA_TYPE_MAX   (2)
++/* ------------------------------------------------------------------------ */
++/* accessing elf header fields                                              */
 +
-+/* netif_extra_info flags. */
-+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
-+#define XEN_NETIF_EXTRA_FLAG_MORE  (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
++#ifdef XEN_ELF_BIG_ENDIAN
++# define NATIVE_ELFDATA ELFDATA2MSB
++#else
++# define NATIVE_ELFDATA ELFDATA2LSB
++#endif
 +
-+/* GSO types - only TCPv4 currently supported. */
-+#define XEN_NETIF_GSO_TYPE_TCPV4        (1)
++#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
++#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
++#define elf_msb(elf)   (ELFDATA2MSB == (elf)->data)
++#define elf_lsb(elf)   (ELFDATA2LSB == (elf)->data)
++#define elf_swap(elf)  (NATIVE_ELFDATA != (elf)->data)
 +
-+/*
-+ * This structure needs to fit within both netif_tx_request and
-+ * netif_rx_response for compatibility.
-+ */
-+struct netif_extra_info {
-+    uint8_t type;  /* XEN_NETIF_EXTRA_TYPE_* */
-+    uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
++#define elf_uval(elf, str, elem)                                        \
++    ((ELFCLASS64 == (elf)->class)                                       \
++     ? elf_access_unsigned((elf), (str),                                \
++                           offsetof(typeof(*(str)),e64.elem),           \
++                           sizeof((str)->e64.elem))                     \
++     : elf_access_unsigned((elf), (str),                                \
++                           offsetof(typeof(*(str)),e32.elem),           \
++                           sizeof((str)->e32.elem)))
++
++#define elf_sval(elf, str, elem)                                        \
++    ((ELFCLASS64 == (elf)->class)                                       \
++     ? elf_access_signed((elf), (str),                                  \
++                         offsetof(typeof(*(str)),e64.elem),             \
++                         sizeof((str)->e64.elem))                       \
++     : elf_access_signed((elf), (str),                                  \
++                         offsetof(typeof(*(str)),e32.elem),             \
++                         sizeof((str)->e32.elem)))
++
++#define elf_size(elf, str)                              \
++    ((ELFCLASS64 == (elf)->class)                       \
++     ? sizeof((str)->e64) : sizeof((str)->e32))
 +
-+    union {
-+        struct {
-+            /*
-+             * Maximum payload size of each segment. For example, for TCP this
-+             * is just the path MSS.
-+             */
-+            uint16_t size;
++uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
++                             uint64_t offset, size_t size);
++int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
++                          uint64_t offset, size_t size);
 +
-+            /*
-+             * GSO type. This determines the protocol of the packet and any
-+             * extra features required to segment the packet properly.
-+             */
-+            uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
++uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
 +
-+            /* Future expansion. */
-+            uint8_t pad;
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_tools.c                                                        */
 +
-+            /*
-+             * GSO features. This specifies any extra GSO features required
-+             * to process this packet, such as ECN support for TCPv4.
-+             */
-+            uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
-+        } gso;
++int elf_shdr_count(struct elf_binary *elf);
++int elf_phdr_count(struct elf_binary *elf);
 +
-+        uint16_t pad[3];
-+    } u;
-+};
++const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
++const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
++const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
 +
-+struct netif_tx_response {
-+    uint16_t id;
-+    int16_t  status;       /* NETIF_RSP_* */
-+};
-+typedef struct netif_tx_response netif_tx_response_t;
++const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
 +
-+struct netif_rx_request {
-+    uint16_t    id;        /* Echoed in response message.        */
-+    grant_ref_t gref;      /* Reference to incoming granted frame */
-+};
-+typedef struct netif_rx_request netif_rx_request_t;
++const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
++const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
 +
-+/* Packet data has been validated against protocol checksum. */
-+#define _NETRXF_data_validated (0)
-+#define  NETRXF_data_validated (1U<<_NETRXF_data_validated)
++const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
++const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
 +
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETRXF_csum_blank     (1)
-+#define  NETRXF_csum_blank     (1U<<_NETRXF_csum_blank)
++const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
++const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
++uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
++const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
 +
-+/* Packet continues in the next request descriptor. */
-+#define _NETRXF_more_data      (2)
-+#define  NETRXF_more_data      (1U<<_NETRXF_more_data)
++int elf_is_elfbinary(const void *image);
++int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
 +
-+/* Packet to be followed by extra descriptor(s). */
-+#define _NETRXF_extra_info     (3)
-+#define  NETRXF_extra_info     (1U<<_NETRXF_extra_info)
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_loader.c                                                       */
++
++int elf_init(struct elf_binary *elf, const char *image, size_t size);
++#ifdef __XEN__
++void elf_set_verbose(struct elf_binary *elf);
++#else
++void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
++#endif
++
++void elf_parse_binary(struct elf_binary *elf);
++void elf_load_binary(struct elf_binary *elf);
++
++void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
++uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
++
++void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_relocate.c                                                     */
++
++int elf_reloc(struct elf_binary *elf);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_dominfo.c                                                      */
++
++#define UNSET_ADDR          ((uint64_t)-1)
 +
-+struct netif_rx_response {
-+    uint16_t id;
-+    uint16_t offset;       /* Offset in page of start of received packet  */
-+    uint16_t flags;        /* NETRXF_* */
-+    int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
++enum xen_elfnote_type {
++    XEN_ENT_NONE = 0,
++    XEN_ENT_LONG = 1,
++    XEN_ENT_STR  = 2
 +};
-+typedef struct netif_rx_response netif_rx_response_t;
 +
-+/*
-+ * Generate netif ring structures and types.
-+ */
++struct xen_elfnote {
++    enum xen_elfnote_type type;
++    const char *name;
++    union {
++        const char *str;
++        uint64_t num;
++    } data;
++};
 +
-+DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
-+DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
++struct elf_dom_parms {
++    /* raw */
++    const char *guest_info;
++    const void *elf_note_start;
++    const void *elf_note_end;
++    struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
++  
++    /* parsed */
++    char guest_os[16];
++    char guest_ver[16];
++    char xen_ver[16];
++    char loader[16];
++    int pae;
++    int bsd_symtab;
++    uint64_t virt_base;
++    uint64_t virt_entry;
++    uint64_t virt_hypercall;
++    uint64_t virt_hv_start_low;
++    uint64_t elf_paddr_offset;
++    uint32_t f_supported[XENFEAT_NR_SUBMAPS];
++    uint32_t f_required[XENFEAT_NR_SUBMAPS];
 +
-+#define NETIF_RSP_DROPPED         -2
-+#define NETIF_RSP_ERROR           -1
-+#define NETIF_RSP_OKAY             0
-+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
-+#define NETIF_RSP_NULL             1
++    /* calculated */
++    uint64_t virt_offset;
++    uint64_t virt_kstart;
++    uint64_t virt_kend;
++};
 +
-+#endif
++static inline void elf_xen_feature_set(int nr, uint32_t * addr)
++{
++    addr[nr >> 5] |= 1 << (nr & 31);
++}
++static inline int elf_xen_feature_get(int nr, uint32_t * addr)
++{
++    return !!(addr[nr >> 5] & (1 << (nr & 31)));
++}
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/pciif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/pciif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/pciif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/pciif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,83 @@
-+/*
-+ * PCI Backend/Frontend Common Data Structures & Macros
-+ *
++int elf_xen_parse_features(const char *features,
++                           uint32_t *supported,
++                           uint32_t *required);
++int elf_xen_parse_note(struct elf_binary *elf,
++                       struct elf_dom_parms *parms,
++                       const elf_note *note);
++int elf_xen_parse_guest_info(struct elf_binary *elf,
++                             struct elf_dom_parms *parms);
++int elf_xen_parse(struct elf_binary *elf,
++                  struct elf_dom_parms *parms);
++
++#endif /* __XC_LIBELF__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/memory.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/memory.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,312 @@
++/******************************************************************************
++ * memory.h
++ * 
++ * Memory reservation and information.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -101857,386 +140946,367 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
 + */
-+#ifndef __XEN_PCI_COMMON_H__
-+#define __XEN_PCI_COMMON_H__
-+
-+/* Be sure to bump this number if you change this file */
-+#define XEN_PCI_MAGIC "7"
-+
-+/* xen_pci_sharedinfo flags */
-+#define _XEN_PCIF_active     (0)
-+#define XEN_PCIF_active      (1<<_XEN_PCI_active)
 +
-+/* xen_pci_op commands */
-+#define XEN_PCI_OP_conf_read    (0)
-+#define XEN_PCI_OP_conf_write   (1)
++#ifndef __XEN_PUBLIC_MEMORY_H__
++#define __XEN_PUBLIC_MEMORY_H__
 +
-+/* xen_pci_op error numbers */
-+#define XEN_PCI_ERR_success          (0)
-+#define XEN_PCI_ERR_dev_not_found   (-1)
-+#define XEN_PCI_ERR_invalid_offset  (-2)
-+#define XEN_PCI_ERR_access_denied   (-3)
-+#define XEN_PCI_ERR_not_implemented (-4)
-+/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
-+#define XEN_PCI_ERR_op_failed       (-5)
++/*
++ * Increase or decrease the specified domain's memory reservation. Returns the
++ * number of extents successfully allocated or freed.
++ * arg == addr of struct xen_memory_reservation.
++ */
++#define XENMEM_increase_reservation 0
++#define XENMEM_decrease_reservation 1
++#define XENMEM_populate_physmap     6
 +
-+struct xen_pci_op {
-+    /* IN: what action to perform: XEN_PCI_OP_* */
-+    uint32_t cmd;
++#if __XEN_INTERFACE_VERSION__ >= 0x00030209
++/*
++ * Maximum # bits addressable by the user of the allocated region (e.g., I/O 
++ * devices often have a 32-bit limitation even in 64-bit systems). If zero 
++ * then the user has no addressing restriction. This field is not used by 
++ * XENMEM_decrease_reservation.
++ */
++#define XENMEMF_address_bits(x)     (x)
++#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
++/* NUMA node to allocate from. */
++#define XENMEMF_node(x)     (((x) + 1) << 8)
++#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
++#endif
 +
-+    /* OUT: will contain an error number (if any) from errno.h */
-+    int32_t err;
++struct xen_memory_reservation {
 +
-+    /* IN: which device to touch */
-+    uint32_t domain; /* PCI Domain/Segment */
-+    uint32_t bus;
-+    uint32_t devfn;
++    /*
++     * XENMEM_increase_reservation:
++     *   OUT: MFN (*not* GMFN) bases of extents that were allocated
++     * XENMEM_decrease_reservation:
++     *   IN:  GMFN bases of extents to free
++     * XENMEM_populate_physmap:
++     *   IN:  GPFN bases of extents to populate with memory
++     *   OUT: GMFN bases of extents that were allocated
++     *   (NB. This command also updates the mach_to_phys translation table)
++     */
++    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
 +
-+    /* IN: which configuration registers to touch */
-+    int32_t offset;
-+    int32_t size;
++    /* Number of extents, and size/alignment of each (2^extent_order pages). */
++    xen_ulong_t    nr_extents;
++    unsigned int   extent_order;
 +
-+    /* IN/OUT: Contains the result after a READ or the value to WRITE */
-+    uint32_t value;
-+};
++#if __XEN_INTERFACE_VERSION__ >= 0x00030209
++    /* XENMEMF flags. */
++    unsigned int   mem_flags;
++#else
++    unsigned int   address_bits;
++#endif
 +
-+struct xen_pci_sharedinfo {
-+    /* flags - XEN_PCIF_* */
-+    uint32_t flags;
-+    struct xen_pci_op op;
++    /*
++     * Domain whose reservation is being changed.
++     * Unprivileged domains can specify only DOMID_SELF.
++     */
++    domid_t        domid;
 +};
-+
-+#endif /* __XEN_PCI_COMMON_H__ */
++typedef struct xen_memory_reservation xen_memory_reservation_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
 +
 +/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++ * An atomic exchange of memory pages. If return code is zero then
++ * @out.extent_list provides GMFNs of the newly-allocated memory.
++ * Returns zero on complete success, otherwise a negative error code.
++ * On complete success then always @nr_exchanged == @in.nr_extents.
++ * On partial success @nr_exchanged indicates how much work was done.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/protocols.h tmp-linux-2.6-xen.patch/include/xen/interface/io/protocols.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/protocols.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/protocols.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,21 @@
-+#ifndef __XEN_PROTOCOLS_H__
-+#define __XEN_PROTOCOLS_H__
++#define XENMEM_exchange             11
++struct xen_memory_exchange {
++    /*
++     * [IN] Details of memory extents to be exchanged (GMFN bases).
++     * Note that @in.address_bits is ignored and unused.
++     */
++    struct xen_memory_reservation in;
 +
-+#define XEN_IO_PROTO_ABI_X86_32     "x86_32-abi"
-+#define XEN_IO_PROTO_ABI_X86_64     "x86_64-abi"
-+#define XEN_IO_PROTO_ABI_IA64       "ia64-abi"
-+#define XEN_IO_PROTO_ABI_POWERPC64  "powerpc64-abi"
++    /*
++     * [IN/OUT] Details of new memory extents.
++     * We require that:
++     *  1. @in.domid == @out.domid
++     *  2. @in.nr_extents  << @in.extent_order == 
++     *     @out.nr_extents << @out.extent_order
++     *  3. @in.extent_start and @out.extent_start lists must not overlap
++     *  4. @out.extent_start lists GPFN bases to be populated
++     *  5. @out.extent_start is overwritten with allocated GMFN bases
++     */
++    struct xen_memory_reservation out;
 +
-+#if defined(__i386__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
-+#elif defined(__x86_64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
-+#elif defined(__ia64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
-+#elif defined(__powerpc64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
-+#else
-+# error arch fixup needed here
-+#endif
++    /*
++     * [OUT] Number of input extents that were successfully exchanged:
++     *  1. The first @nr_exchanged input extents were successfully
++     *     deallocated.
++     *  2. The corresponding first entries in the output extent list correctly
++     *     indicate the GMFNs that were successfully exchanged.
++     *  3. All other input and output extents are untouched.
++     *  4. If not all input exents are exchanged then the return code of this
++     *     command will be non-zero.
++     *  5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
++     */
++    xen_ulong_t nr_exchanged;
++};
++typedef struct xen_memory_exchange xen_memory_exchange_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
 +
-+#endif
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/ring.h tmp-linux-2.6-xen.patch/include/xen/interface/io/ring.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/ring.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/ring.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,299 @@
-+/******************************************************************************
-+ * ring.h
-+ * 
-+ * Shared producer-consumer ring macros.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Tim Deegan and Andrew Warfield November 2004.
++/*
++ * Returns the maximum machine frame number of mapped RAM in this system.
++ * This command always succeeds (it never returns an error code).
++ * arg == NULL.
 + */
++#define XENMEM_maximum_ram_page     2
 +
-+#ifndef __XEN_PUBLIC_IO_RING_H__
-+#define __XEN_PUBLIC_IO_RING_H__
-+
-+typedef unsigned int RING_IDX;
-+
-+/* Round a 32-bit unsigned constant down to the nearest power of two. */
-+#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
-+#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
-+#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
-+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
-+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
++/*
++ * Returns the current or maximum memory reservation, in pages, of the
++ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
++ * arg == addr of domid_t.
++ */
++#define XENMEM_current_reservation  3
++#define XENMEM_maximum_reservation  4
 +
 +/*
-+ * Calculate size of a shared ring, given the total available space for the
-+ * ring and indexes (_sz), and the name tag of the request/response structure.
-+ * A ring contains as many entries as will fit, rounded down to the nearest 
-+ * power of two (so we can mask with (size-1) to loop around).
++ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
 + */
-+#define __RING_SIZE(_s, _sz) \
-+    (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
++#define XENMEM_maximum_gpfn         14
 +
 +/*
-+ * Macros to make the correct C datatypes for a new kind of ring.
-+ * 
-+ * To make a new ring datatype, you need to have two message structures,
-+ * let's say request_t, and response_t already defined.
-+ *
-+ * In a header where you want the ring datatype declared, you then do:
-+ *
-+ *     DEFINE_RING_TYPES(mytag, request_t, response_t);
-+ *
-+ * These expand out to give you a set of types, as you can see below.
-+ * The most important of these are:
-+ * 
-+ *     mytag_sring_t      - The shared ring.
-+ *     mytag_front_ring_t - The 'front' half of the ring.
-+ *     mytag_back_ring_t  - The 'back' half of the ring.
-+ *
-+ * To initialize a ring in your code you need to know the location and size
-+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
-+ * the front half:
-+ *
-+ *     mytag_front_ring_t front_ring;
-+ *     SHARED_RING_INIT((mytag_sring_t *)shared_page);
-+ *     FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ *
-+ * Initializing the back follows similarly (note that only the front
-+ * initializes the shared ring):
-+ *
-+ *     mytag_back_ring_t back_ring;
-+ *     BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table do not implement
++ * this command.
++ * arg == addr of xen_machphys_mfn_list_t.
 + */
++#define XENMEM_machphys_mfn_list    5
++struct xen_machphys_mfn_list {
++    /*
++     * Size of the 'extent_start' array. Fewer entries will be filled if the
++     * machphys table is smaller than max_extents * 2MB.
++     */
++    unsigned int max_extents;
 +
-+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
-+                                                                        \
-+/* Shared ring entry */                                                 \
-+union __name##_sring_entry {                                            \
-+    __req_t req;                                                        \
-+    __rsp_t rsp;                                                        \
-+};                                                                      \
-+                                                                        \
-+/* Shared ring page */                                                  \
-+struct __name##_sring {                                                 \
-+    RING_IDX req_prod, req_event;                                       \
-+    RING_IDX rsp_prod, rsp_event;                                       \
-+    uint8_t  pad[48];                                                   \
-+    union __name##_sring_entry ring[1]; /* variable-length */           \
-+};                                                                      \
-+                                                                        \
-+/* "Front" end's private variables */                                   \
-+struct __name##_front_ring {                                            \
-+    RING_IDX req_prod_pvt;                                              \
-+    RING_IDX rsp_cons;                                                  \
-+    unsigned int nr_ents;                                               \
-+    struct __name##_sring *sring;                                       \
-+};                                                                      \
-+                                                                        \
-+/* "Back" end's private variables */                                    \
-+struct __name##_back_ring {                                             \
-+    RING_IDX rsp_prod_pvt;                                              \
-+    RING_IDX req_cons;                                                  \
-+    unsigned int nr_ents;                                               \
-+    struct __name##_sring *sring;                                       \
-+};                                                                      \
-+                                                                        \
-+/* Syntactic sugar */                                                   \
-+typedef struct __name##_sring __name##_sring_t;                         \
-+typedef struct __name##_front_ring __name##_front_ring_t;               \
-+typedef struct __name##_back_ring __name##_back_ring_t
++    /*
++     * Pointer to buffer to fill with list of extent starts. If there are
++     * any large discontiguities in the machine address space, 2MB gaps in
++     * the machphys table will be represented by an MFN base of zero.
++     */
++    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++
++    /*
++     * Number of extents written to the above array. This will be smaller
++     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
++     */
++    unsigned int nr_extents;
++};
++typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
 +
 +/*
-+ * Macros for manipulating rings.
-+ * 
-+ * FRONT_RING_whatever works on the "front end" of a ring: here 
-+ * requests are pushed on to the ring and responses taken off it.
-+ * 
-+ * BACK_RING_whatever works on the "back end" of a ring: here 
-+ * requests are taken off the ring and responses put on.
-+ * 
-+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 
-+ * This is OK in 1-for-1 request-response situations where the 
-+ * requestor (front end) never has more than RING_SIZE()-1
-+ * outstanding requests.
++ * Returns the location in virtual address space of the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table, or which do not
++ * map it by default into guest address space, do not implement this command.
++ * arg == addr of xen_machphys_mapping_t.
 + */
++#define XENMEM_machphys_mapping     12
++struct xen_machphys_mapping {
++    xen_ulong_t v_start, v_end; /* Start and end virtual addresses.   */
++    xen_ulong_t max_mfn;        /* Maximum MFN that can be looked up. */
++};
++typedef struct xen_machphys_mapping xen_machphys_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
 +
-+/* Initialising empty rings */
-+#define SHARED_RING_INIT(_s) do {                                       \
-+    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
-+    (_s)->req_event = (_s)->rsp_event = 1;                              \
-+    (void)memset((_s)->pad, 0, sizeof((_s)->pad));                      \
-+} while(0)
-+
-+#define FRONT_RING_INIT(_r, _s, __size) do {                            \
-+    (_r)->req_prod_pvt = 0;                                             \
-+    (_r)->rsp_cons = 0;                                                 \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+    (_r)->sring = (_s);                                                 \
-+} while (0)
++/*
++ * Sets the GPFN at which a particular page appears in the specified guest's
++ * pseudophysical address space.
++ * arg == addr of xen_add_to_physmap_t.
++ */
++#define XENMEM_add_to_physmap      7
++struct xen_add_to_physmap {
++    /* Which domain to change the mapping for. */
++    domid_t domid;
 +
-+#define BACK_RING_INIT(_r, _s, __size) do {                             \
-+    (_r)->rsp_prod_pvt = 0;                                             \
-+    (_r)->req_cons = 0;                                                 \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+    (_r)->sring = (_s);                                                 \
-+} while (0)
++    /* Source mapping space. */
++#define XENMAPSPACE_shared_info 0 /* shared info page */
++#define XENMAPSPACE_grant_table 1 /* grant table page */
++#define XENMAPSPACE_mfn         2 /* usual MFN */
++    unsigned int space;
 +
-+/* Initialize to existing shared indexes -- for recovery */
-+#define FRONT_RING_ATTACH(_r, _s, __size) do {                          \
-+    (_r)->sring = (_s);                                                 \
-+    (_r)->req_prod_pvt = (_s)->req_prod;                                \
-+    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+} while (0)
++    /* Index into source mapping space. */
++    xen_ulong_t idx;
 +
-+#define BACK_RING_ATTACH(_r, _s, __size) do {                           \
-+    (_r)->sring = (_s);                                                 \
-+    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
-+    (_r)->req_cons = (_s)->req_prod;                                    \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+} while (0)
++    /* GPFN where the source mapping page should appear. */
++    xen_pfn_t     gpfn;
++};
++typedef struct xen_add_to_physmap xen_add_to_physmap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
 +
-+/* How big is this ring? */
-+#define RING_SIZE(_r)                                                   \
-+    ((_r)->nr_ents)
++/*
++ * Unmaps the page appearing at a particular GPFN from the specified guest's
++ * pseudophysical address space.
++ * arg == addr of xen_remove_from_physmap_t.
++ */
++#define XENMEM_remove_from_physmap      15
++struct xen_remove_from_physmap {
++    /* Which domain to change the mapping for. */
++    domid_t domid;
 +
-+/* Number of free requests (for use on front side only). */
-+#define RING_FREE_REQUESTS(_r)                                          \
-+    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
++    /* GPFN of the current mapping of the page. */
++    xen_pfn_t     gpfn;
++};
++typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
 +
-+/* Test if there is an empty slot available on the front ring.
-+ * (This is only meaningful from the front. )
++/*
++ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
++ * code on failure. This call only works for auto-translated guests.
 + */
-+#define RING_FULL(_r)                                                   \
-+    (RING_FREE_REQUESTS(_r) == 0)
++#define XENMEM_translate_gpfn_list  8
++struct xen_translate_gpfn_list {
++    /* Which domain to translate for? */
++    domid_t domid;
 +
-+/* Test if there are outstanding messages to be processed on a ring. */
-+#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
-+    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
++    /* Length of list. */
++    xen_ulong_t nr_gpfns;
 +
-+#ifdef __GNUC__
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({                             \
-+    unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;          \
-+    unsigned int rsp = RING_SIZE(_r) -                                  \
-+        ((_r)->req_cons - (_r)->rsp_prod_pvt);                          \
-+    req < rsp ? req : rsp;                                              \
-+})
-+#else
-+/* Same as above, but without the nice GCC ({ ... }) syntax. */
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
-+    ((((_r)->sring->req_prod - (_r)->req_cons) <                        \
-+      (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ?        \
-+     ((_r)->sring->req_prod - (_r)->req_cons) :                         \
-+     (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
-+#endif
++    /* List of GPFNs to translate. */
++    XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
 +
-+/* Direct access to individual ring elements, by index. */
-+#define RING_GET_REQUEST(_r, _idx)                                      \
-+    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
++    /*
++     * Output list to contain MFN translations. May be the same as the input
++     * list (in which case each input GPFN is overwritten with the output MFN).
++     */
++    XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
++};
++typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
++
++/*
++ * Returns the pseudo-physical memory map as it was when the domain
++ * was started (specified by XENMEM_set_memory_map).
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_memory_map           9
++struct xen_memory_map {
++    /*
++     * On call the number of entries which can be stored in buffer. On
++     * return the number of entries which have been stored in
++     * buffer.
++     */
++    unsigned int nr_entries;
 +
-+#define RING_GET_RESPONSE(_r, _idx)                                     \
-+    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
++    /*
++     * Entries in the buffer are in the same format as returned by the
++     * BIOS INT 0x15 EAX=0xE820 call.
++     */
++    XEN_GUEST_HANDLE(void) buffer;
++};
++typedef struct xen_memory_map xen_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
 +
-+/* Loop termination condition: Would the specified index overflow the ring? */
-+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
-+    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
++/*
++ * Returns the real physical memory map. Passes the same structure as
++ * XENMEM_memory_map.
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_machine_memory_map   10
 +
-+#define RING_PUSH_REQUESTS(_r) do {                                     \
-+    wmb(); /* back sees requests /before/ updated producer index */     \
-+    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
-+} while (0)
++/*
++ * Set the pseudo-physical memory map of a domain, as returned by
++ * XENMEM_memory_map.
++ * arg == addr of xen_foreign_memory_map_t.
++ */
++#define XENMEM_set_memory_map       13
++struct xen_foreign_memory_map {
++    domid_t domid;
++    struct xen_memory_map map;
++};
++typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
 +
-+#define RING_PUSH_RESPONSES(_r) do {                                    \
-+    wmb(); /* front sees responses /before/ updated producer index */   \
-+    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
-+} while (0)
++#endif /* __XEN_PUBLIC_MEMORY_H__ */
 +
 +/*
-+ * Notification hold-off (req_event and rsp_event):
-+ * 
-+ * When queueing requests or responses on a shared ring, it may not always be
-+ * necessary to notify the remote end. For example, if requests are in flight
-+ * in a backend, the front may be able to queue further requests without
-+ * notifying the back (if the back checks for new requests when it queues
-+ * responses).
-+ * 
-+ * When enqueuing requests or responses:
-+ * 
-+ *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
-+ *  is a boolean return value. True indicates that the receiver requires an
-+ *  asynchronous notification.
-+ * 
-+ * After dequeuing requests or responses (before sleeping the connection):
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/nmi.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/nmi.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,78 @@
++/******************************************************************************
++ * nmi.h
 + * 
-+ *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
-+ *  The second argument is a boolean return value. True indicates that there
-+ *  are pending messages on the ring (i.e., the connection should not be put
-+ *  to sleep).
++ * NMI callback registration and reason codes.
 + * 
-+ *  These macros will set the req_event/rsp_event field to trigger a
-+ *  notification on the very next message that is enqueued. If you want to
-+ *  create batches of work (i.e., only receive a notification after several
-+ *  messages have been enqueued) then you will need to create a customised
-+ *  version of the FINAL_CHECK macro in your own code, which sets the event
-+ *  field appropriately.
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
 + */
 +
-+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
-+    RING_IDX __old = (_r)->sring->req_prod;                             \
-+    RING_IDX __new = (_r)->req_prod_pvt;                                \
-+    wmb(); /* back sees requests /before/ updated producer index */     \
-+    (_r)->sring->req_prod = __new;                                      \
-+    mb(); /* back sees new requests /before/ we check req_event */      \
-+    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
-+                 (RING_IDX)(__new - __old));                            \
-+} while (0)
++#ifndef __XEN_PUBLIC_NMI_H__
++#define __XEN_PUBLIC_NMI_H__
 +
-+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
-+    RING_IDX __old = (_r)->sring->rsp_prod;                             \
-+    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
-+    wmb(); /* front sees responses /before/ updated producer index */   \
-+    (_r)->sring->rsp_prod = __new;                                      \
-+    mb(); /* front sees new responses /before/ we check rsp_event */    \
-+    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
-+                 (RING_IDX)(__new - __old));                            \
-+} while (0)
++/*
++ * NMI reason codes:
++ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
++ */
++ /* I/O-check error reported via ISA port 0x61, bit 6. */
++#define _XEN_NMIREASON_io_error     0
++#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
++ /* Parity error reported via ISA port 0x61, bit 7. */
++#define _XEN_NMIREASON_parity_error 1
++#define XEN_NMIREASON_parity_error  (1UL << _XEN_NMIREASON_parity_error)
++ /* Unknown hardware-generated NMI. */
++#define _XEN_NMIREASON_unknown      2
++#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
 +
-+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
-+    if (_work_to_do) break;                                             \
-+    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
-+    mb();                                                               \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
-+} while (0)
++/*
++ * long nmi_op(unsigned int cmd, void *arg)
++ * NB. All ops return zero on success, else a negative error code.
++ */
 +
-+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
-+    if (_work_to_do) break;                                             \
-+    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
-+    mb();                                                               \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
-+} while (0)
++/*
++ * Register NMI callback for this (calling) VCPU. Currently this only makes
++ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
++ * arg == pointer to xennmi_callback structure.
++ */
++#define XENNMI_register_callback   0
++struct xennmi_callback {
++    unsigned long handler_address;
++    unsigned long pad;
++};
++typedef struct xennmi_callback xennmi_callback_t;
++DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
 +
-+#endif /* __XEN_PUBLIC_IO_RING_H__ */
++/*
++ * Deregister NMI callback for this (calling) VCPU.
++ * arg == NULL.
++ */
++#define XENNMI_unregister_callback 1
++
++#endif /* __XEN_PUBLIC_NMI_H__ */
 +
 +/*
 + * Local variables:
@@ -102247,15 +141317,11 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/tpmif.h tmp-linux-2.6-xen.patch/include/xen/interface/io/tpmif.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/tpmif.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/tpmif.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,77 @@
-+/******************************************************************************
-+ * tpmif.h
-+ *
-+ * TPM I/O interface for Xen guest OSes.
-+ *
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/physdev.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/physdev.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,219 @@
++/*
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -102273,51 +141339,197 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from tools/libxc/xen/io/netif.h
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
 + */
 +
-+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
-+#define __XEN_PUBLIC_IO_TPMIF_H__
++#ifndef __XEN_PUBLIC_PHYSDEV_H__
++#define __XEN_PUBLIC_PHYSDEV_H__
 +
-+#include "../grant_table.h"
++/*
++ * Prototype for this hypercall is:
++ *  int physdev_op(int cmd, void *args)
++ * @cmd  == PHYSDEVOP_??? (physdev operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
 +
-+struct tpmif_tx_request {
-+    unsigned long addr;   /* Machine address of packet.   */
-+    grant_ref_t ref;      /* grant table access reference */
-+    uint16_t unused;
-+    uint16_t size;        /* Packet size in bytes.        */
++/*
++ * Notify end-of-interrupt (EOI) for the specified IRQ.
++ * @arg == pointer to physdev_eoi structure.
++ */
++#define PHYSDEVOP_eoi                   12
++struct physdev_eoi {
++    /* IN */
++    uint32_t irq;
 +};
-+typedef struct tpmif_tx_request tpmif_tx_request_t;
++typedef struct physdev_eoi physdev_eoi_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
 +
 +/*
-+ * The TPMIF_TX_RING_SIZE defines the number of pages the
-+ * front-end and backend can exchange (= size of array).
++ * Query the status of an IRQ line.
++ * @arg == pointer to physdev_irq_status_query structure.
 + */
-+typedef uint32_t TPMIF_RING_IDX;
++#define PHYSDEVOP_irq_status_query       5
++struct physdev_irq_status_query {
++    /* IN */
++    uint32_t irq;
++    /* OUT */
++    uint32_t flags; /* XENIRQSTAT_* */
++};
++typedef struct physdev_irq_status_query physdev_irq_status_query_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
 +
-+#define TPMIF_TX_RING_SIZE 1
++/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
++#define _XENIRQSTAT_needs_eoi   (0)
++#define  XENIRQSTAT_needs_eoi   (1U<<_XENIRQSTAT_needs_eoi)
 +
-+/* This structure must fit in a memory page. */
++/* IRQ shared by multiple guests? */
++#define _XENIRQSTAT_shared      (1)
++#define  XENIRQSTAT_shared      (1U<<_XENIRQSTAT_shared)
 +
-+struct tpmif_ring {
-+    struct tpmif_tx_request req;
++/*
++ * Set the current VCPU's I/O privilege level.
++ * @arg == pointer to physdev_set_iopl structure.
++ */
++#define PHYSDEVOP_set_iopl               6
++struct physdev_set_iopl {
++    /* IN */
++    uint32_t iopl;
 +};
-+typedef struct tpmif_ring tpmif_ring_t;
++typedef struct physdev_set_iopl physdev_set_iopl_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
 +
-+struct tpmif_tx_interface {
-+    struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
++/*
++ * Set the current VCPU's I/O-port permissions bitmap.
++ * @arg == pointer to physdev_set_iobitmap structure.
++ */
++#define PHYSDEVOP_set_iobitmap           7
++struct physdev_set_iobitmap {
++    /* IN */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030205
++    XEN_GUEST_HANDLE(uint8) bitmap;
++#else
++    uint8_t *bitmap;
++#endif
++    uint32_t nr_ports;
 +};
-+typedef struct tpmif_tx_interface tpmif_tx_interface_t;
++typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
 +
-+#endif
++/*
++ * Read or write an IO-APIC register.
++ * @arg == pointer to physdev_apic structure.
++ */
++#define PHYSDEVOP_apic_read              8
++#define PHYSDEVOP_apic_write             9
++struct physdev_apic {
++    /* IN */
++    unsigned long apic_physbase;
++    uint32_t reg;
++    /* IN or OUT */
++    uint32_t value;
++};
++typedef struct physdev_apic physdev_apic_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
++
++/*
++ * Allocate or free a physical upcall vector for the specified IRQ line.
++ * @arg == pointer to physdev_irq structure.
++ */
++#define PHYSDEVOP_alloc_irq_vector      10
++#define PHYSDEVOP_free_irq_vector       11
++struct physdev_irq {
++    /* IN */
++    uint32_t irq;
++    /* IN or OUT */
++    uint32_t vector;
++};
++typedef struct physdev_irq physdev_irq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
++ 
++#define MAP_PIRQ_TYPE_MSI               0x0
++#define MAP_PIRQ_TYPE_GSI               0x1
++#define MAP_PIRQ_TYPE_UNKNOWN           0x2
++
++#define PHYSDEVOP_map_pirq               13
++struct physdev_map_pirq {
++    domid_t domid;
++    /* IN */
++    int type;
++    /* IN */
++    int index;
++    /* IN or OUT */
++    int pirq;
++    /* IN */
++    int bus;
++    /* IN */
++    int devfn;
++    /* IN */
++    int entry_nr;
++    /* IN */
++    uint64_t table_base;
++};
++typedef struct physdev_map_pirq physdev_map_pirq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
++
++#define PHYSDEVOP_unmap_pirq             14
++struct physdev_unmap_pirq {
++    domid_t domid;
++    /* IN */
++    int pirq;
++};
++
++typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
++
++#define PHYSDEVOP_manage_pci_add         15
++#define PHYSDEVOP_manage_pci_remove      16
++struct physdev_manage_pci {
++    /* IN */
++    uint8_t bus;
++    uint8_t devfn;
++}; 
++
++typedef struct physdev_manage_pci physdev_manage_pci_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
++
++/*
++ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
++ * hypercall since 0x00030202.
++ */
++struct physdev_op {
++    uint32_t cmd;
++    union {
++        struct physdev_irq_status_query      irq_status_query;
++        struct physdev_set_iopl              set_iopl;
++        struct physdev_set_iobitmap          set_iobitmap;
++        struct physdev_apic                  apic_op;
++        struct physdev_irq                   irq_op;
++    } u;
++};
++typedef struct physdev_op physdev_op_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
++
++/*
++ * Notify that some PIRQ-bound event channels have been unmasked.
++ * ** This command is obsolete since interface version 0x00030202 and is **
++ * ** unsupported by newer versions of Xen.                              **
++ */
++#define PHYSDEVOP_IRQ_UNMASK_NOTIFY      4
++
++/*
++ * These all-capitals physdev operation names are superceded by the new names
++ * (defined above) since interface version 0x00030202.
++ */
++#define PHYSDEVOP_IRQ_STATUS_QUERY       PHYSDEVOP_irq_status_query
++#define PHYSDEVOP_SET_IOPL               PHYSDEVOP_set_iopl
++#define PHYSDEVOP_SET_IOBITMAP           PHYSDEVOP_set_iobitmap
++#define PHYSDEVOP_APIC_READ              PHYSDEVOP_apic_read
++#define PHYSDEVOP_APIC_WRITE             PHYSDEVOP_apic_write
++#define PHYSDEVOP_ASSIGN_VECTOR          PHYSDEVOP_alloc_irq_vector
++#define PHYSDEVOP_FREE_VECTOR            PHYSDEVOP_free_irq_vector
++#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
++#define PHYSDEVOP_IRQ_SHARED             XENIRQSTAT_shared
++
++#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
 +
 +/*
 + * Local variables:
@@ -102328,15 +141540,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/xenbus.h tmp-linux-2.6-xen.patch/include/xen/interface/io/xenbus.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/xenbus.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,73 @@
-+/*****************************************************************************
-+ * xenbus.h
-+ *
-+ * Xenbus protocol details.
-+ *
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/platform.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/platform.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,346 @@
++/******************************************************************************
++ * platform.h
++ * 
++ * Hardware platform operations. Intended for use by domain-0 kernel.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -102355,46 +141567,319 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (C) 2005 XenSource Ltd.
++ * Copyright (c) 2002-2006, K Fraser
 + */
 +
-+#ifndef _XEN_PUBLIC_IO_XENBUS_H
-+#define _XEN_PUBLIC_IO_XENBUS_H
++#ifndef __XEN_PUBLIC_PLATFORM_H__
++#define __XEN_PUBLIC_PLATFORM_H__
++
++#include "xen.h"
++
++#define XENPF_INTERFACE_VERSION 0x03000001
 +
 +/*
-+ * The state of either end of the Xenbus, i.e. the current communication
-+ * status of initialisation across the bus.  States here imply nothing about
-+ * the state of the connection between the driver and the kernel's device
-+ * layers.
++ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
++ * 1 January, 1970 if the current system time was <system_time>.
 + */
-+enum xenbus_state {
-+    XenbusStateUnknown       = 0,
++#define XENPF_settime             17
++struct xenpf_settime {
++    /* IN variables. */
++    uint32_t secs;
++    uint32_t nsecs;
++    uint64_t system_time;
++};
++typedef struct xenpf_settime xenpf_settime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
 +
-+    XenbusStateInitialising  = 1,
++/*
++ * Request memory range (@mfn, @mfn+ at nr_mfns-1) to have type @type.
++ * On x86, @type is an architecture-defined MTRR memory type.
++ * On success, returns the MTRR that was used (@reg) and a handle that can
++ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
++ * (x86-specific).
++ */
++#define XENPF_add_memtype         31
++struct xenpf_add_memtype {
++    /* IN variables. */
++    xen_pfn_t mfn;
++    uint64_t nr_mfns;
++    uint32_t type;
++    /* OUT variables. */
++    uint32_t handle;
++    uint32_t reg;
++};
++typedef struct xenpf_add_memtype xenpf_add_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
 +
-+    /*
-+     * InitWait: Finished early initialisation but waiting for information
-+     * from the peer or hotplug scripts.
-+     */
-+    XenbusStateInitWait      = 2,
++/*
++ * Tear down an existing memory-range type. If @handle is remembered then it
++ * should be passed in to accurately tear down the correct setting (in case
++ * of overlapping memory regions with differing types). If it is not known
++ * then @handle should be set to zero. In all cases @reg must be set.
++ * (x86-specific).
++ */
++#define XENPF_del_memtype         32
++struct xenpf_del_memtype {
++    /* IN variables. */
++    uint32_t handle;
++    uint32_t reg;
++};
++typedef struct xenpf_del_memtype xenpf_del_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
 +
-+    /*
-+     * Initialised: Waiting for a connection from the peer.
-+     */
-+    XenbusStateInitialised   = 3,
++/* Read current type of an MTRR (x86-specific). */
++#define XENPF_read_memtype        33
++struct xenpf_read_memtype {
++    /* IN variables. */
++    uint32_t reg;
++    /* OUT variables. */
++    xen_pfn_t mfn;
++    uint64_t nr_mfns;
++    uint32_t type;
++};
++typedef struct xenpf_read_memtype xenpf_read_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
++
++#define XENPF_microcode_update    35
++struct xenpf_microcode_update {
++    /* IN variables. */
++    XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */
++    uint32_t length;                  /* Length of microcode data. */
++};
++typedef struct xenpf_microcode_update xenpf_microcode_update_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
++
++#define XENPF_platform_quirk      39
++#define QUIRK_NOIRQBALANCING      1 /* Do not restrict IO-APIC RTE targets */
++#define QUIRK_IOAPIC_BAD_REGSEL   2 /* IO-APIC REGSEL forgets its value    */
++#define QUIRK_IOAPIC_GOOD_REGSEL  3 /* IO-APIC REGSEL behaves properly     */
++struct xenpf_platform_quirk {
++    /* IN variables. */
++    uint32_t quirk_id;
++};
++typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
++
++#define XENPF_firmware_info       50
++#define XEN_FW_DISK_INFO          1 /* from int 13 AH=08/41/48 */
++#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
++#define XEN_FW_VBEDDC_INFO        3 /* from int 10 AX=4f15 */
++struct xenpf_firmware_info {
++    /* IN variables. */
++    uint32_t type;
++    uint32_t index;
++    /* OUT variables. */
++    union {
++        struct {
++            /* Int13, Fn48: Check Extensions Present. */
++            uint8_t device;                   /* %dl: bios device number */
++            uint8_t version;                  /* %ah: major version      */
++            uint16_t interface_support;       /* %cx: support bitmap     */
++            /* Int13, Fn08: Legacy Get Device Parameters. */
++            uint16_t legacy_max_cylinder;     /* %cl[7:6]:%ch: max cyl # */
++            uint8_t legacy_max_head;          /* %dh: max head #         */
++            uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector #  */
++            /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
++            /* NB. First uint16_t of buffer must be set to buffer size.      */
++            XEN_GUEST_HANDLE(void) edd_params;
++        } disk_info; /* XEN_FW_DISK_INFO */
++        struct {
++            uint8_t device;                   /* bios device number  */
++            uint32_t mbr_signature;           /* offset 0x1b8 in mbr */
++        } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
++        struct {
++            /* Int10, AX=4F15: Get EDID info. */
++            uint8_t capabilities;
++            uint8_t edid_transfer_time;
++            /* must refer to 128-byte buffer */
++            XEN_GUEST_HANDLE(uint8) edid;
++        } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
++    } u;
++};
++typedef struct xenpf_firmware_info xenpf_firmware_info_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
++
++#define XENPF_enter_acpi_sleep    51
++struct xenpf_enter_acpi_sleep {
++    /* IN variables */
++    uint16_t pm1a_cnt_val;      /* PM1a control value. */
++    uint16_t pm1b_cnt_val;      /* PM1b control value. */
++    uint32_t sleep_state;       /* Which state to enter (Sn). */
++    uint32_t flags;             /* Must be zero. */
++};
++typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
++
++#define XENPF_change_freq         52
++struct xenpf_change_freq {
++    /* IN variables */
++    uint32_t flags; /* Must be zero. */
++    uint32_t cpu;   /* Physical cpu. */
++    uint64_t freq;  /* New frequency (Hz). */
++};
++typedef struct xenpf_change_freq xenpf_change_freq_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t);
++
++/*
++ * Get idle times (nanoseconds since boot) for physical CPUs specified in the
++ * @cpumap_bitmap with range [0.. at cpumap_nr_cpus-1]. The @idletime array is
++ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap
++ * bit set are written to. On return, @cpumap_bitmap is modified so that any
++ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry
++ * cleared.
++ */
++#define XENPF_getidletime         53
++struct xenpf_getidletime {
++    /* IN/OUT variables */
++    /* IN: CPUs to interrogate; OUT: subset of IN which are present */
++    XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
++    /* IN variables */
++    /* Size of cpumap bitmap. */
++    uint32_t cpumap_nr_cpus;
++    /* Must be indexable for every cpu in cpumap_bitmap. */
++    XEN_GUEST_HANDLE(uint64) idletime;
++    /* OUT variables */
++    /* System time when the idletime snapshots were taken. */
++    uint64_t now;
++};
++typedef struct xenpf_getidletime xenpf_getidletime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t);
 +
-+    XenbusStateConnected     = 4,
++#define XENPF_set_processor_pminfo      54
 +
-+    /*
-+     * Closing: The device is being closed due to an error or an unplug event.
-+     */
-+    XenbusStateClosing       = 5,
++/* ability bits */
++#define XEN_PROCESSOR_PM_CX	1
++#define XEN_PROCESSOR_PM_PX	2
++#define XEN_PROCESSOR_PM_TX	4
++
++/* cmd type */
++#define XEN_PM_CX   0
++#define XEN_PM_PX   1
++#define XEN_PM_TX   2
++
++/* Px sub info type */
++#define XEN_PX_PCT   1
++#define XEN_PX_PSS   2
++#define XEN_PX_PPC   4
++#define XEN_PX_PSD   8
++
++struct xen_power_register {
++    uint32_t     space_id;
++    uint32_t     bit_width;
++    uint32_t     bit_offset;
++    uint32_t     access_size;
++    uint64_t     address;
++};
++
++struct xen_processor_csd {
++    uint32_t    domain;      /* domain number of one dependent group */
++    uint32_t    coord_type;  /* coordination type */
++    uint32_t    num;         /* number of processors in same domain */
++};
++typedef struct xen_processor_csd xen_processor_csd_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t);
++
++struct xen_processor_cx {
++    struct xen_power_register  reg; /* GAS for Cx trigger register */
++    uint8_t     type;     /* cstate value, c0: 0, c1: 1, ... */
++    uint32_t    latency;  /* worst latency (ms) to enter/exit this cstate */
++    uint32_t    power;    /* average power consumption(mW) */
++    uint32_t    dpcnt;    /* number of dependency entries */
++    XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */
++};
++typedef struct xen_processor_cx xen_processor_cx_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t);
++
++struct xen_processor_flags {
++    uint32_t bm_control:1;
++    uint32_t bm_check:1;
++    uint32_t has_cst:1;
++    uint32_t power_setup_done:1;
++    uint32_t bm_rld_set:1;
++};
++
++struct xen_processor_power {
++    uint32_t count;  /* number of C state entries in array below */
++    struct xen_processor_flags flags;  /* global flags of this processor */
++    XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */
++};
++
++struct xen_pct_register {
++    uint8_t  descriptor;
++    uint16_t length;
++    uint8_t  space_id;
++    uint8_t  bit_width;
++    uint8_t  bit_offset;
++    uint8_t  reserved;
++    uint64_t address;
++};
++
++struct xen_processor_px {
++    uint64_t core_frequency; /* megahertz */
++    uint64_t power;      /* milliWatts */
++    uint64_t transition_latency; /* microseconds */
++    uint64_t bus_master_latency; /* microseconds */
++    uint64_t control;        /* control value */
++    uint64_t status;     /* success indicator */
++};
++typedef struct xen_processor_px xen_processor_px_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t);
++
++struct xen_psd_package {
++    uint64_t num_entries;
++    uint64_t revision;
++    uint64_t domain;
++    uint64_t coord_type;
++    uint64_t num_processors;
++};
++
++struct xen_processor_performance {
++    uint32_t flags;     /* flag for Px sub info type */
++    uint32_t ppc;       /* Platform limitation on freq usage */
++    struct xen_pct_register control_register;
++    struct xen_pct_register status_register;
++    uint32_t state_count;     /* total available performance states */
++    XEN_GUEST_HANDLE(xen_processor_px_t) states;
++    struct xen_psd_package domain_info;
++    uint32_t shared_type;     /* coordination type of this processor */
++};
++typedef struct xen_processor_performance xen_processor_performance_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t);
 +
-+    XenbusStateClosed        = 6
++struct xenpf_set_processor_pminfo {
++    /* IN variables */
++    uint32_t id;    /* ACPI CPU ID */
++    uint32_t type;  /* {XEN_PM_CX, XEN_PM_PX} */
++    union {
++        struct xen_processor_power          power;/* Cx: _CST/_CSD */
++        struct xen_processor_performance    perf; /* Px: _PPC/_PCT/_PSS/_PSD */
++    };
 +};
-+typedef enum xenbus_state XenbusState;
++typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
 +
-+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
++struct xen_platform_op {
++    uint32_t cmd;
++    uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
++    union {
++        struct xenpf_settime           settime;
++        struct xenpf_add_memtype       add_memtype;
++        struct xenpf_del_memtype       del_memtype;
++        struct xenpf_read_memtype      read_memtype;
++        struct xenpf_microcode_update  microcode;
++        struct xenpf_platform_quirk    platform_quirk;
++        struct xenpf_firmware_info     firmware_info;
++        struct xenpf_enter_acpi_sleep  enter_acpi_sleep;
++        struct xenpf_change_freq       change_freq;
++        struct xenpf_getidletime       getidletime;
++        struct xenpf_set_processor_pminfo set_pminfo;
++        uint8_t                        pad[128];
++    } u;
++};
++typedef struct xen_platform_op xen_platform_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
++
++#endif /* __XEN_PUBLIC_PLATFORM_H__ */
 +
 +/*
 + * Local variables:
@@ -102405,14 +141890,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/io/xs_wire.h tmp-linux-2.6-xen.patch/include/xen/interface/io/xs_wire.h
---- pristine-linux-2.6.18.2/include/xen/interface/io/xs_wire.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/io/xs_wire.h	2007-09-30 18:06:19.000000000 +0200
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/sched.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/sched.h	Wed Sep 10 10:54:08 2008 +0100
 @@ -0,0 +1,121 @@
-+/*
-+ * Details of the "wire" protocol between Xen Store Daemon and client
-+ * library or guest kernel.
-+ *
++/******************************************************************************
++ * sched.h
++ * 
++ * Scheduler state interactions
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -102431,236 +141917,94 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (C) 2005 Rusty Russell IBM Corporation
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
 + */
 +
-+#ifndef _XS_WIRE_H
-+#define _XS_WIRE_H
-+
-+enum xsd_sockmsg_type
-+{
-+    XS_DEBUG,
-+    XS_DIRECTORY,
-+    XS_READ,
-+    XS_GET_PERMS,
-+    XS_WATCH,
-+    XS_UNWATCH,
-+    XS_TRANSACTION_START,
-+    XS_TRANSACTION_END,
-+    XS_INTRODUCE,
-+    XS_RELEASE,
-+    XS_GET_DOMAIN_PATH,
-+    XS_WRITE,
-+    XS_MKDIR,
-+    XS_RM,
-+    XS_SET_PERMS,
-+    XS_WATCH_EVENT,
-+    XS_ERROR,
-+    XS_IS_DOMAIN_INTRODUCED,
-+    XS_RESUME
-+};
-+
-+#define XS_WRITE_NONE "NONE"
-+#define XS_WRITE_CREATE "CREATE"
-+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
-+
-+/* We hand errors as strings, for portability. */
-+struct xsd_errors
-+{
-+    int errnum;
-+    const char *errstring;
-+};
-+#define XSD_ERROR(x) { x, #x }
-+static struct xsd_errors xsd_errors[]
-+#if defined(__GNUC__)
-+__attribute__((unused))
-+#endif
-+    = {
-+    XSD_ERROR(EINVAL),
-+    XSD_ERROR(EACCES),
-+    XSD_ERROR(EEXIST),
-+    XSD_ERROR(EISDIR),
-+    XSD_ERROR(ENOENT),
-+    XSD_ERROR(ENOMEM),
-+    XSD_ERROR(ENOSPC),
-+    XSD_ERROR(EIO),
-+    XSD_ERROR(ENOTEMPTY),
-+    XSD_ERROR(ENOSYS),
-+    XSD_ERROR(EROFS),
-+    XSD_ERROR(EBUSY),
-+    XSD_ERROR(EAGAIN),
-+    XSD_ERROR(EISCONN)
-+};
-+
-+struct xsd_sockmsg
-+{
-+    uint32_t type;  /* XS_??? */
-+    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
-+    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
-+    uint32_t len;   /* Length of data following this. */
-+
-+    /* Generally followed by nul-terminated string(s). */
-+};
-+
-+enum xs_watch_type
-+{
-+    XS_WATCH_PATH = 0,
-+    XS_WATCH_TOKEN
-+};
-+
-+/* Inter-domain shared memory communications. */
-+#define XENSTORE_RING_SIZE 1024
-+typedef uint32_t XENSTORE_RING_IDX;
-+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
-+struct xenstore_domain_interface {
-+    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
-+    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
-+    XENSTORE_RING_IDX req_cons, req_prod;
-+    XENSTORE_RING_IDX rsp_cons, rsp_prod;
-+};
++#ifndef __XEN_PUBLIC_SCHED_H__
++#define __XEN_PUBLIC_SCHED_H__
 +
-+#endif /* _XS_WIRE_H */
++#include "event_channel.h"
 +
 +/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/kexec.h tmp-linux-2.6-xen.patch/include/xen/interface/kexec.h
---- pristine-linux-2.6.18.2/include/xen/interface/kexec.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/kexec.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,137 @@
-+/******************************************************************************
-+ * kexec.h - Public portion
++ * The prototype for this hypercall is:
++ *  long sched_op(int cmd, void *arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == Operation-specific extra argument(s), as described below.
 + * 
-+ * Xen port written by:
-+ * - Simon 'Horms' Horman <horms at verge.net.au>
-+ * - Magnus Damm <magnus at valinux.co.jp>
-+ */
-+
-+#ifndef _XEN_PUBLIC_KEXEC_H
-+#define _XEN_PUBLIC_KEXEC_H
-+
-+
-+/* This file describes the Kexec / Kdump hypercall interface for Xen.
-+ *
-+ * Kexec under vanilla Linux allows a user to reboot the physical machine 
-+ * into a new user-specified kernel. The Xen port extends this idea
-+ * to allow rebooting of the machine from dom0. When kexec for dom0
-+ * is used to reboot,  both the hypervisor and the domains get replaced
-+ * with some other kernel. It is possible to kexec between vanilla
-+ * Linux and Xen and back again. Xen to Xen works well too.
-+ *
-+ * The hypercall interface for kexec can be divided into three main
-+ * types of hypercall operations:
-+ *
-+ * 1) Range information:
-+ *    This is used by the dom0 kernel to ask the hypervisor about various 
-+ *    address information. This information is needed to allow kexec-tools 
-+ *    to fill in the ELF headers for /proc/vmcore properly.
-+ *
-+ * 2) Load and unload of images:
-+ *    There are no big surprises here, the kexec binary from kexec-tools
-+ *    runs in userspace in dom0. The tool loads/unloads data into the
-+ *    dom0 kernel such as new kernel, initramfs and hypervisor. When
-+ *    loaded the dom0 kernel performs a load hypercall operation, and
-+ *    before releasing all page references the dom0 kernel calls unload.
-+ *
-+ * 3) Kexec operation:
-+ *    This is used to start a previously loaded kernel.
++ * Versions of Xen prior to 3.0.2 provided only the following legacy version
++ * of this hypercall, supporting only the commands yield, block and shutdown:
++ *  long sched_op(int cmd, unsigned long arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == 0               (SCHEDOP_yield and SCHEDOP_block)
++ *      == SHUTDOWN_* code (SCHEDOP_shutdown)
++ * This legacy version is available to new guests as sched_op_compat().
 + */
 +
-+#include "xen.h"
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+#define KEXEC_XEN_NO_PAGES 17
-+#endif
-+
 +/*
-+ * Prototype for this hypercall is:
-+ *  int kexec_op(int cmd, void *args)
-+ * @cmd  == KEXEC_CMD_... 
-+ *          KEXEC operation to perform
-+ * @args == Operation-specific extra arguments (NULL if none).
++ * Voluntarily yield the CPU.
++ * @arg == NULL.
 + */
++#define SCHEDOP_yield       0
 +
 +/*
-+ * Kexec supports two types of operation:
-+ * - kexec into a regular kernel, very similar to a standard reboot
-+ *   - KEXEC_TYPE_DEFAULT is used to specify this type
-+ * - kexec into a special "crash kernel", aka kexec-on-panic
-+ *   - KEXEC_TYPE_CRASH is used to specify this type
-+ *   - parts of our system may be broken at kexec-on-panic time
-+ *     - the code should be kept as simple and self-contained as possible
++ * Block execution of this VCPU until an event is received for processing.
++ * If called with event upcalls masked, this operation will atomically
++ * reenable event delivery and check for pending events before blocking the
++ * VCPU. This avoids a "wakeup waiting" race.
++ * @arg == NULL.
 + */
++#define SCHEDOP_block       1
 +
-+#define KEXEC_TYPE_DEFAULT 0
-+#define KEXEC_TYPE_CRASH   1
-+
-+
-+/* The kexec implementation for Xen allows the user to load two
-+ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
-+ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
-+ * per "instance". The data mainly consists of machine address lists to pages
-+ * together with destination addresses. The data in xen_kexec_image_t
-+ * is passed to the "code page" which is one page of code that performs
-+ * the final relocations before jumping to the new kernel.
++/*
++ * Halt execution of this domain (all VCPUs) and notify the system controller.
++ * @arg == pointer to sched_shutdown structure.
 + */
-+ 
-+typedef struct xen_kexec_image {
-+#if defined(__i386__) || defined(__x86_64__)
-+    unsigned long page_list[KEXEC_XEN_NO_PAGES];
-+#endif
-+    unsigned long indirection_page;
-+    unsigned long start_address;
-+} xen_kexec_image_t;
++#define SCHEDOP_shutdown    2
++struct sched_shutdown {
++    unsigned int reason; /* SHUTDOWN_* */
++};
++typedef struct sched_shutdown sched_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
 +
 +/*
-+ * Perform kexec having previously loaded a kexec or kdump kernel
-+ * as appropriate.
-+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ * Poll a set of event-channel ports. Return when one or more are pending. An
++ * optional timeout may be specified.
++ * @arg == pointer to sched_poll structure.
 + */
-+#define KEXEC_CMD_kexec                 0
-+typedef struct xen_kexec_exec {
-+    int type;
-+} xen_kexec_exec_t;
++#define SCHEDOP_poll        3
++struct sched_poll {
++    XEN_GUEST_HANDLE(evtchn_port_t) ports;
++    unsigned int nr_ports;
++    uint64_t timeout;
++};
++typedef struct sched_poll sched_poll_t;
++DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
 +
 +/*
-+ * Load/Unload kernel image for kexec or kdump.
-+ * type  == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
-+ * image == relocation information for kexec (ignored for unload) [in]
++ * Declare a shutdown for another domain. The main use of this function is
++ * in interpreting shutdown requests and reasons for fully-virtualized
++ * domains.  A para-virtualized domain may use SCHEDOP_shutdown directly.
++ * @arg == pointer to sched_remote_shutdown structure.
 + */
-+#define KEXEC_CMD_kexec_load            1
-+#define KEXEC_CMD_kexec_unload          2
-+typedef struct xen_kexec_load {
-+    int type;
-+    xen_kexec_image_t image;
-+} xen_kexec_load_t;
-+
-+#define KEXEC_RANGE_MA_CRASH 0   /* machine address and size of crash area */
-+#define KEXEC_RANGE_MA_XEN   1   /* machine address and size of Xen itself */
-+#define KEXEC_RANGE_MA_CPU   2   /* machine address and size of a CPU note */
++#define SCHEDOP_remote_shutdown        4
++struct sched_remote_shutdown {
++    domid_t domain_id;         /* Remote domain ID */
++    unsigned int reason;       /* SHUTDOWN_xxx reason */
++};
++typedef struct sched_remote_shutdown sched_remote_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
 +
 +/*
-+ * Find the address and size of certain memory areas
-+ * range == KEXEC_RANGE_... [in]
-+ * nr    == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
-+ * size  == number of bytes reserved in window [out]
-+ * start == address of the first byte in the window [out]
++ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
++ * software to determine the appropriate action. For the most part, Xen does
++ * not care about the shutdown code.
 + */
-+#define KEXEC_CMD_kexec_get_range       3
-+typedef struct xen_kexec_range {
-+    int range;
-+    int nr;
-+    unsigned long size;
-+    unsigned long start;
-+} xen_kexec_range_t;
++#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
++#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
++#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
++#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 +
-+#endif /* _XEN_PUBLIC_KEXEC_H */
++#endif /* __XEN_PUBLIC_SCHED_H__ */
 +
 +/*
 + * Local variables:
@@ -102671,259 +142015,489 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/libelf.h tmp-linux-2.6-xen.patch/include/xen/interface/libelf.h
---- pristine-linux-2.6.18.2/include/xen/interface/libelf.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/libelf.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,241 @@
-+#ifndef __XC_LIBELF__
-+#define __XC_LIBELF__ 1
-+
-+#if defined(__i386__) || defined(__x86_64) || defined(__ia64__)
-+#define XEN_ELF_LITTLE_ENDIAN
-+#elif defined(__powerpc__)
-+#define XEN_ELF_BIG_ENDIAN
-+#else
-+#error define architectural endianness
-+#endif
-+
-+#undef ELFSIZE
-+#include "elfnote.h"
-+#include "elfstructs.h"
-+#include "features.h"
-+
-+/* ------------------------------------------------------------------------ */
-+
-+typedef union {
-+    Elf32_Ehdr e32;
-+    Elf64_Ehdr e64;
-+} elf_ehdr;
-+
-+typedef union {
-+    Elf32_Phdr e32;
-+    Elf64_Phdr e64;
-+} elf_phdr;
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/sysctl.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/sysctl.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,295 @@
++/******************************************************************************
++ * sysctl.h
++ * 
++ * System management operations. For use by node control stack.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
 +
-+typedef union {
-+    Elf32_Shdr e32;
-+    Elf64_Shdr e64;
-+} elf_shdr;
++#ifndef __XEN_PUBLIC_SYSCTL_H__
++#define __XEN_PUBLIC_SYSCTL_H__
 +
-+typedef union {
-+    Elf32_Sym e32;
-+    Elf64_Sym e64;
-+} elf_sym;
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "sysctl operations are intended for use by node control tools only"
++#endif
 +
-+typedef union {
-+    Elf32_Rel e32;
-+    Elf64_Rel e64;
-+} elf_rel;
++#include "xen.h"
++#include "domctl.h"
 +
-+typedef union {
-+    Elf32_Rela e32;
-+    Elf64_Rela e64;
-+} elf_rela;
++#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
 +
-+typedef union {
-+    Elf32_Note e32;
-+    Elf64_Note e64;
-+} elf_note;
++/*
++ * Read console content from Xen buffer ring.
++ */
++#define XEN_SYSCTL_readconsole       1
++struct xen_sysctl_readconsole {
++    /* IN: Non-zero -> clear after reading. */
++    uint8_t clear;
++    /* IN: Non-zero -> start index specified by @index field. */
++    uint8_t incremental;
++    uint8_t pad0, pad1;
++    /*
++     * IN:  Start index for consuming from ring buffer (if @incremental);
++     * OUT: End index after consuming from ring buffer.
++     */
++    uint32_t index; 
++    /* IN: Virtual address to write console data. */
++    XEN_GUEST_HANDLE_64(char) buffer;
++    /* IN: Size of buffer; OUT: Bytes written to buffer. */
++    uint32_t count;
++};
++typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
 +
-+struct elf_binary {
-+    /* elf binary */
-+    const char *image;
-+    size_t size;
-+    char class;
-+    char data;
++/* Get trace buffers machine base address */
++#define XEN_SYSCTL_tbuf_op           2
++struct xen_sysctl_tbuf_op {
++    /* IN variables */
++#define XEN_SYSCTL_TBUFOP_get_info     0
++#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
++#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
++#define XEN_SYSCTL_TBUFOP_set_size     3
++#define XEN_SYSCTL_TBUFOP_enable       4
++#define XEN_SYSCTL_TBUFOP_disable      5
++    uint32_t cmd;
++    /* IN/OUT variables */
++    struct xenctl_cpumap cpu_mask;
++    uint32_t             evt_mask;
++    /* OUT variables */
++    uint64_aligned_t buffer_mfn;
++    uint32_t size;
++};
++typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
 +
-+    const elf_ehdr *ehdr;
-+    const char *sec_strtab;
-+    const elf_shdr *sym_tab;
-+    const char *sym_strtab;
++/*
++ * Get physical information about the host machine
++ */
++#define XEN_SYSCTL_physinfo          3
++ /* (x86) The platform supports HVM guests. */
++#define _XEN_SYSCTL_PHYSCAP_hvm          0
++#define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
++ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
++#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
++#define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
++struct xen_sysctl_physinfo {
++    uint32_t threads_per_core;
++    uint32_t cores_per_socket;
++    uint32_t nr_cpus;
++    uint32_t nr_nodes;
++    uint32_t cpu_khz;
++    uint64_aligned_t total_pages;
++    uint64_aligned_t free_pages;
++    uint64_aligned_t scrub_pages;
++    uint32_t hw_cap[8];
 +
-+    /* loaded to */
-+    char *dest;
-+    uint64_t pstart;
-+    uint64_t pend;
-+    uint64_t reloc_offset;
++    /*
++     * IN: maximum addressable entry in the caller-provided cpu_to_node array.
++     * OUT: largest cpu identifier in the system.
++     * If OUT is greater than IN then the cpu_to_node array is truncated!
++     */
++    uint32_t max_cpu_id;
++    /*
++     * If not NULL, this array is filled with node identifier for each cpu.
++     * If a cpu has no node information (e.g., cpu not present) then the
++     * sentinel value ~0u is written.
++     * The size of this array is specified by the caller in @max_cpu_id.
++     * If the actual @max_cpu_id is smaller than the array then the trailing
++     * elements of the array will not be written by the sysctl.
++     */
++    XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
 +
-+#ifndef __XEN__
-+    /* misc */
-+    FILE *log;
-+#endif
-+    int verbose;
++    /* XEN_SYSCTL_PHYSCAP_??? */
++    uint32_t capabilities;
 +};
++typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
 +
-+/* ------------------------------------------------------------------------ */
-+/* accessing elf header fields                                              */
-+
-+#ifdef XEN_ELF_BIG_ENDIAN
-+# define NATIVE_ELFDATA ELFDATA2MSB
-+#else
-+# define NATIVE_ELFDATA ELFDATA2LSB
-+#endif
-+
-+#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
-+#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
-+#define elf_msb(elf)   (ELFDATA2MSB == (elf)->data)
-+#define elf_lsb(elf)   (ELFDATA2LSB == (elf)->data)
-+#define elf_swap(elf)  (NATIVE_ELFDATA != (elf)->data)
++/*
++ * Get the ID of the current scheduler.
++ */
++#define XEN_SYSCTL_sched_id          4
++struct xen_sysctl_sched_id {
++    /* OUT variable */
++    uint32_t sched_id;
++};
++typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
 +
-+#define elf_uval(elf, str, elem)			\
-+	((ELFCLASS64 == (elf)->class)			\
-+	? elf_access_unsigned((elf), (str),		\
-+		offsetof(typeof(*(str)),e64.elem),	\
-+		sizeof((str)->e64.elem))		\
-+	: elf_access_unsigned((elf), (str),		\
-+		offsetof(typeof(*(str)),e32.elem),	\
-+		sizeof((str)->e32.elem)))
-+
-+#define elf_sval(elf, str, elem)			\
-+	((ELFCLASS64 == (elf)->class)			\
-+	? elf_access_signed((elf), (str),		\
-+		offsetof(typeof(*(str)),e64.elem),	\
-+		sizeof((str)->e64.elem))		\
-+	: elf_access_signed((elf), (str),		\
-+		offsetof(typeof(*(str)),e32.elem),	\
-+		sizeof((str)->e32.elem)))
-+
-+#define elf_size(elf, str)		\
-+	((ELFCLASS64 == (elf)->class)	\
-+	? sizeof((str)->e64)		\
-+	: sizeof((str)->e32))
++/* Interface for controlling Xen software performance counters. */
++#define XEN_SYSCTL_perfc_op          5
++/* Sub-operations: */
++#define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
++#define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
++struct xen_sysctl_perfc_desc {
++    char         name[80];             /* name of perf counter */
++    uint32_t     nr_vals;              /* number of values for this counter */
++};
++typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
++typedef uint32_t xen_sysctl_perfc_val_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
 +
-+uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
-+			     uint64_t offset, size_t size);
-+int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
-+			  uint64_t offset, size_t size);
++struct xen_sysctl_perfc_op {
++    /* IN variables. */
++    uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
++    /* OUT variables. */
++    uint32_t       nr_counters;       /*  number of counters description  */
++    uint32_t       nr_vals;           /*  number of values  */
++    /* counter information (or NULL) */
++    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
++    /* counter values (or NULL) */
++    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
++};
++typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
 +
-+uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
++#define XEN_SYSCTL_getdomaininfolist 6
++struct xen_sysctl_getdomaininfolist {
++    /* IN variables. */
++    domid_t               first_domain;
++    uint32_t              max_domains;
++    XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
++    /* OUT variables. */
++    uint32_t              num_domains;
++};
++typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
 +
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_tools.c                                                        */
++/* Inject debug keys into Xen. */
++#define XEN_SYSCTL_debug_keys        7
++struct xen_sysctl_debug_keys {
++    /* IN variables. */
++    XEN_GUEST_HANDLE_64(char) keys;
++    uint32_t nr_keys;
++};
++typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
 +
-+int elf_shdr_count(struct elf_binary *elf);
-+int elf_phdr_count(struct elf_binary *elf);
++/* Get physical CPU information. */
++#define XEN_SYSCTL_getcpuinfo        8
++struct xen_sysctl_cpuinfo {
++    uint64_aligned_t idletime;
++};
++typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); 
++struct xen_sysctl_getcpuinfo {
++    /* IN variables. */
++    uint32_t max_cpus;
++    XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
++    /* OUT variables. */
++    uint32_t nr_cpus;
++}; 
++typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); 
 +
-+const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
-+const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
-+const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
++#define XEN_SYSCTL_availheap         9
++struct xen_sysctl_availheap {
++    /* IN variables. */
++    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
++    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
++    int32_t  node;          /* NUMA node of interest (-1 for all nodes). */
++    /* OUT variables. */
++    uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
++};
++typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
 +
-+const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
-+const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
-+const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
++#define XEN_SYSCTL_get_pmstat        10
++struct pm_px_val {
++    uint64_aligned_t freq;        /* Px core frequency */
++    uint64_aligned_t residency;   /* Px residency time */
++    uint64_aligned_t count;       /* Px transition count */
++};
++typedef struct pm_px_val pm_px_val_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
++
++struct pm_px_stat {
++    uint8_t total;        /* total Px states */
++    uint8_t usable;       /* usable Px states */
++    uint8_t last;         /* last Px state */
++    uint8_t cur;          /* current Px state */
++    XEN_GUEST_HANDLE_64(uint64) trans_pt;   /* Px transition table */
++    XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
++};
++typedef struct pm_px_stat pm_px_stat_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
++
++struct pm_cx_stat {
++    uint32_t nr;    /* entry nr in triggers & residencies, including C0 */
++    uint32_t last;  /* last Cx state */
++    uint64_aligned_t idle_time;                 /* idle time from boot */
++    XEN_GUEST_HANDLE_64(uint64) triggers;    /* Cx trigger counts */
++    XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
++};
++
++struct xen_sysctl_get_pmstat {
++#define PMSTAT_CATEGORY_MASK 0xf0
++#define PMSTAT_PX            0x10
++#define PMSTAT_CX            0x20
++#define PMSTAT_get_max_px    (PMSTAT_PX | 0x1)
++#define PMSTAT_get_pxstat    (PMSTAT_PX | 0x2)
++#define PMSTAT_reset_pxstat  (PMSTAT_PX | 0x3)
++#define PMSTAT_get_max_cx    (PMSTAT_CX | 0x1)
++#define PMSTAT_get_cxstat    (PMSTAT_CX | 0x2)
++#define PMSTAT_reset_cxstat  (PMSTAT_CX | 0x3)
++    uint32_t type;
++    uint32_t cpuid;
++    union {
++        struct pm_px_stat getpx;
++        struct pm_cx_stat getcx;
++        /* other struct for tx, etc */
++    } u;
++};
++typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
 +
-+const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
-+const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
++struct xen_sysctl {
++    uint32_t cmd;
++    uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
++    union {
++        struct xen_sysctl_readconsole       readconsole;
++        struct xen_sysctl_tbuf_op           tbuf_op;
++        struct xen_sysctl_physinfo          physinfo;
++        struct xen_sysctl_sched_id          sched_id;
++        struct xen_sysctl_perfc_op          perfc_op;
++        struct xen_sysctl_getdomaininfolist getdomaininfolist;
++        struct xen_sysctl_debug_keys        debug_keys;
++        struct xen_sysctl_getcpuinfo        getcpuinfo;
++        struct xen_sysctl_availheap         availheap;
++        struct xen_sysctl_get_pmstat        get_pmstat;
++        uint8_t                             pad[128];
++    } u;
++};
++typedef struct xen_sysctl xen_sysctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
 +
-+const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
-+const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
++#endif /* __XEN_PUBLIC_SYSCTL_H__ */
 +
-+const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
-+const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
-+uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
-+const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/trace.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/trace.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,172 @@
++/******************************************************************************
++ * include/public/trace.h
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ * Copyright (C) 2005 Bin Ren
++ */
 +
-+int elf_is_elfbinary(const void *image);
-+int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
++#ifndef __XEN_PUBLIC_TRACE_H__
++#define __XEN_PUBLIC_TRACE_H__
 +
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_loader.c                                                       */
++#define TRACE_EXTRA_MAX    7
++#define TRACE_EXTRA_SHIFT 28
 +
-+int elf_init(struct elf_binary *elf, const char *image, size_t size);
-+#ifdef __XEN__
-+void elf_set_verbose(struct elf_binary *elf);
-+#else
-+void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
-+#endif
++/* Trace classes */
++#define TRC_CLS_SHIFT 16
++#define TRC_GEN      0x0001f000    /* General trace            */
++#define TRC_SCHED    0x0002f000    /* Xen Scheduler trace      */
++#define TRC_DOM0OP   0x0004f000    /* Xen DOM0 operation trace */
++#define TRC_HVM      0x0008f000    /* Xen HVM trace            */
++#define TRC_MEM      0x0010f000    /* Xen memory trace         */
++#define TRC_PV       0x0020f000    /* Xen PV traces            */
++#define TRC_ALL      0x0ffff000
++#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
++#define TRC_HD_CYCLE_FLAG (1UL<<31)
++#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
++#define TRC_HD_EXTRA(x)    (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
 +
-+void elf_parse_binary(struct elf_binary *elf);
-+void elf_load_binary(struct elf_binary *elf);
++/* Trace subclasses */
++#define TRC_SUBCLS_SHIFT 12
 +
-+void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
-+uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
++/* trace subclasses for SVM */
++#define TRC_HVM_ENTRYEXIT 0x00081000   /* VMENTRY and #VMEXIT       */
++#define TRC_HVM_HANDLER   0x00082000   /* various HVM handlers      */
 +
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_relocate.c                                                     */
++/* Trace events per class */
++#define TRC_LOST_RECORDS        (TRC_GEN + 1)
++#define TRC_TRACE_WRAP_BUFFER  (TRC_GEN + 2)
++#define TRC_TRACE_CPU_CHANGE    (TRC_GEN + 3)
 +
-+int elf_reloc(struct elf_binary *elf);
++#define TRC_SCHED_DOM_ADD       (TRC_SCHED +  1)
++#define TRC_SCHED_DOM_REM       (TRC_SCHED +  2)
++#define TRC_SCHED_SLEEP         (TRC_SCHED +  3)
++#define TRC_SCHED_WAKE          (TRC_SCHED +  4)
++#define TRC_SCHED_YIELD         (TRC_SCHED +  5)
++#define TRC_SCHED_BLOCK         (TRC_SCHED +  6)
++#define TRC_SCHED_SHUTDOWN      (TRC_SCHED +  7)
++#define TRC_SCHED_CTL           (TRC_SCHED +  8)
++#define TRC_SCHED_ADJDOM        (TRC_SCHED +  9)
++#define TRC_SCHED_SWITCH        (TRC_SCHED + 10)
++#define TRC_SCHED_S_TIMER_FN    (TRC_SCHED + 11)
++#define TRC_SCHED_T_TIMER_FN    (TRC_SCHED + 12)
++#define TRC_SCHED_DOM_TIMER_FN  (TRC_SCHED + 13)
++#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
++#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
 +
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_dominfo.c                                                      */
++#define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
++#define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
++#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
 +
-+#define UNSET_ADDR          ((uint64_t)-1)
++#define TRC_PV_HYPERCALL             (TRC_PV +  1)
++#define TRC_PV_TRAP                  (TRC_PV +  3)
++#define TRC_PV_PAGE_FAULT            (TRC_PV +  4)
++#define TRC_PV_FORCED_INVALID_OP     (TRC_PV +  5)
++#define TRC_PV_EMULATE_PRIVOP        (TRC_PV +  6)
++#define TRC_PV_EMULATE_4GB           (TRC_PV +  7)
++#define TRC_PV_MATH_STATE_RESTORE    (TRC_PV +  8)
++#define TRC_PV_PAGING_FIXUP          (TRC_PV +  9)
++#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
++#define TRC_PV_PTWR_EMULATION        (TRC_PV + 11)
++#define TRC_PV_PTWR_EMULATION_PAE    (TRC_PV + 12)
++  /* Indicates that addresses in trace record are 64 bits */
++#define TRC_64_FLAG               (0x100) 
 +
-+enum xen_elfnote_type {
-+    XEN_ENT_NONE = 0,
-+    XEN_ENT_LONG = 1,
-+    XEN_ENT_STR  = 2
-+};
++/* trace events per subclass */
++#define TRC_HVM_VMENTRY         (TRC_HVM_ENTRYEXIT + 0x01)
++#define TRC_HVM_VMEXIT          (TRC_HVM_ENTRYEXIT + 0x02)
++#define TRC_HVM_VMEXIT64        (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
++#define TRC_HVM_PF_XEN          (TRC_HVM_HANDLER + 0x01)
++#define TRC_HVM_PF_XEN64        (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
++#define TRC_HVM_PF_INJECT       (TRC_HVM_HANDLER + 0x02)
++#define TRC_HVM_PF_INJECT64     (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
++#define TRC_HVM_INJ_EXC         (TRC_HVM_HANDLER + 0x03)
++#define TRC_HVM_INJ_VIRQ        (TRC_HVM_HANDLER + 0x04)
++#define TRC_HVM_REINJ_VIRQ      (TRC_HVM_HANDLER + 0x05)
++#define TRC_HVM_IO_READ         (TRC_HVM_HANDLER + 0x06)
++#define TRC_HVM_IO_WRITE        (TRC_HVM_HANDLER + 0x07)
++#define TRC_HVM_CR_READ         (TRC_HVM_HANDLER + 0x08)
++#define TRC_HVM_CR_READ64       (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
++#define TRC_HVM_CR_WRITE        (TRC_HVM_HANDLER + 0x09)
++#define TRC_HVM_CR_WRITE64      (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
++#define TRC_HVM_DR_READ         (TRC_HVM_HANDLER + 0x0A)
++#define TRC_HVM_DR_WRITE        (TRC_HVM_HANDLER + 0x0B)
++#define TRC_HVM_MSR_READ        (TRC_HVM_HANDLER + 0x0C)
++#define TRC_HVM_MSR_WRITE       (TRC_HVM_HANDLER + 0x0D)
++#define TRC_HVM_CPUID           (TRC_HVM_HANDLER + 0x0E)
++#define TRC_HVM_INTR            (TRC_HVM_HANDLER + 0x0F)
++#define TRC_HVM_NMI             (TRC_HVM_HANDLER + 0x10)
++#define TRC_HVM_SMI             (TRC_HVM_HANDLER + 0x11)
++#define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
++#define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
++#define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
++#define TRC_HVM_INVLPG64        (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
++#define TRC_HVM_MCE             (TRC_HVM_HANDLER + 0x15)
++#define TRC_HVM_IO_ASSIST       (TRC_HVM_HANDLER + 0x16)
++#define TRC_HVM_MMIO_ASSIST     (TRC_HVM_HANDLER + 0x17)
++#define TRC_HVM_CLTS            (TRC_HVM_HANDLER + 0x18)
++#define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
++#define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
 +
-+struct xen_elfnote {
-+    enum xen_elfnote_type type;
-+    const char *name;
++/* This structure represents a single trace buffer record. */
++struct t_rec {
++    uint32_t event:28;
++    uint32_t extra_u32:3;         /* # entries in trailing extra_u32[] array */
++    uint32_t cycles_included:1;   /* u.cycles or u.no_cycles? */
 +    union {
-+	const char *str;
-+	uint64_t num;
-+    } data;
++        struct {
++            uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
++            uint32_t extra_u32[7];         /* event data items */
++        } cycles;
++        struct {
++            uint32_t extra_u32[7];         /* event data items */
++        } nocycles;
++    } u;
 +};
 +
-+struct elf_dom_parms {
-+    /* raw */
-+    const char *guest_info;
-+    const void *elf_note_start;
-+    const void *elf_note_end;
-+    struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
-+  
-+    /* parsed */
-+    char guest_os[16];
-+    char guest_ver[16];
-+    char xen_ver[16];
-+    char loader[16];
-+    int pae;
-+    int bsd_symtab;
-+    uint64_t virt_base;
-+    uint64_t virt_entry;
-+    uint64_t virt_hypercall;
-+    uint64_t virt_hv_start_low;
-+    uint64_t elf_paddr_offset;
-+    uint32_t f_supported[XENFEAT_NR_SUBMAPS];
-+    uint32_t f_required[XENFEAT_NR_SUBMAPS];
-+
-+    /* calculated */
-+    uint64_t virt_offset;
-+    uint64_t virt_kstart;
-+    uint64_t virt_kend;
++/*
++ * This structure contains the metadata for a single trace buffer.  The head
++ * field, indexes into an array of struct t_rec's.
++ */
++struct t_buf {
++    /* Assume the data buffer size is X.  X is generally not a power of 2.
++     * CONS and PROD are incremented modulo (2*X):
++     *     0 <= cons < 2*X
++     *     0 <= prod < 2*X
++     * This is done because addition modulo X breaks at 2^32 when X is not a
++     * power of 2:
++     *     (((2^32 - 1) % X) + 1) % X != (2^32) % X
++     */
++    uint32_t cons;   /* Offset of next item to be consumed by control tools. */
++    uint32_t prod;   /* Offset of next item to be produced by Xen.           */
++    /*  Records follow immediately after the meta-data header.    */
 +};
 +
-+static inline void elf_xen_feature_set(int nr, uint32_t * addr)
-+{
-+    addr[nr >> 5] |= 1 << (nr & 31);
-+}
-+static inline int elf_xen_feature_get(int nr, uint32_t * addr)
-+{
-+    return !!(addr[nr >> 5] & (1 << (nr & 31)));
-+}
-+
-+int elf_xen_parse_features(const char *features,
-+			   uint32_t *supported,
-+			   uint32_t *required);
-+int elf_xen_parse_note(struct elf_binary *elf,
-+		       struct elf_dom_parms *parms,
-+		       const elf_note *note);
-+int elf_xen_parse_guest_info(struct elf_binary *elf,
-+			     struct elf_dom_parms *parms);
-+int elf_xen_parse(struct elf_binary *elf,
-+		  struct elf_dom_parms *parms);
++#endif /* __XEN_PUBLIC_TRACE_H__ */
 +
-+#endif /* __XC_LIBELF__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/memory.h tmp-linux-2.6-xen.patch/include/xen/interface/memory.h
---- pristine-linux-2.6.18.2/include/xen/interface/memory.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/memory.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,281 @@
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/vcpu.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/vcpu.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,213 @@
 +/******************************************************************************
-+ * memory.h
++ * vcpu.h
 + * 
-+ * Memory reservation and information.
++ * VCPU initialisation, query, and hotplug.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -102946,251 +142520,183 @@
 + * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
 + */
 +
-+#ifndef __XEN_PUBLIC_MEMORY_H__
-+#define __XEN_PUBLIC_MEMORY_H__
++#ifndef __XEN_PUBLIC_VCPU_H__
++#define __XEN_PUBLIC_VCPU_H__
 +
 +/*
-+ * Increase or decrease the specified domain's memory reservation. Returns the
-+ * number of extents successfully allocated or freed.
-+ * arg == addr of struct xen_memory_reservation.
++ * Prototype for this hypercall is:
++ *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
++ * @cmd        == VCPUOP_??? (VCPU operation).
++ * @vcpuid     == VCPU to operate on.
++ * @extra_args == Operation-specific extra arguments (NULL if none).
 + */
-+#define XENMEM_increase_reservation 0
-+#define XENMEM_decrease_reservation 1
-+#define XENMEM_populate_physmap     6
-+struct xen_memory_reservation {
-+
-+    /*
-+     * XENMEM_increase_reservation:
-+     *   OUT: MFN (*not* GMFN) bases of extents that were allocated
-+     * XENMEM_decrease_reservation:
-+     *   IN:  GMFN bases of extents to free
-+     * XENMEM_populate_physmap:
-+     *   IN:  GPFN bases of extents to populate with memory
-+     *   OUT: GMFN bases of extents that were allocated
-+     *   (NB. This command also updates the mach_to_phys translation table)
-+     */
-+    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
-+
-+    /* Number of extents, and size/alignment of each (2^extent_order pages). */
-+    xen_ulong_t    nr_extents;
-+    unsigned int   extent_order;
-+
-+    /*
-+     * Maximum # bits addressable by the user of the allocated region (e.g., 
-+     * I/O devices often have a 32-bit limitation even in 64-bit systems). If 
-+     * zero then the user has no addressing restriction.
-+     * This field is not used by XENMEM_decrease_reservation.
-+     */
-+    unsigned int   address_bits;
-+
-+    /*
-+     * Domain whose reservation is being changed.
-+     * Unprivileged domains can specify only DOMID_SELF.
-+     */
-+    domid_t        domid;
-+};
-+typedef struct xen_memory_reservation xen_memory_reservation_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
 +
 +/*
-+ * An atomic exchange of memory pages. If return code is zero then
-+ * @out.extent_list provides GMFNs of the newly-allocated memory.
-+ * Returns zero on complete success, otherwise a negative error code.
-+ * On complete success then always @nr_exchanged == @in.nr_extents.
-+ * On partial success @nr_exchanged indicates how much work was done.
++ * Initialise a VCPU. Each VCPU can be initialised only once. A 
++ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
++ * 
++ * @extra_arg == pointer to vcpu_guest_context structure containing initial
++ *               state for the VCPU.
 + */
-+#define XENMEM_exchange             11
-+struct xen_memory_exchange {
-+    /*
-+     * [IN] Details of memory extents to be exchanged (GMFN bases).
-+     * Note that @in.address_bits is ignored and unused.
-+     */
-+    struct xen_memory_reservation in;
-+
-+    /*
-+     * [IN/OUT] Details of new memory extents.
-+     * We require that:
-+     *  1. @in.domid == @out.domid
-+     *  2. @in.nr_extents  << @in.extent_order == 
-+     *     @out.nr_extents << @out.extent_order
-+     *  3. @in.extent_start and @out.extent_start lists must not overlap
-+     *  4. @out.extent_start lists GPFN bases to be populated
-+     *  5. @out.extent_start is overwritten with allocated GMFN bases
-+     */
-+    struct xen_memory_reservation out;
-+
-+    /*
-+     * [OUT] Number of input extents that were successfully exchanged:
-+     *  1. The first @nr_exchanged input extents were successfully
-+     *     deallocated.
-+     *  2. The corresponding first entries in the output extent list correctly
-+     *     indicate the GMFNs that were successfully exchanged.
-+     *  3. All other input and output extents are untouched.
-+     *  4. If not all input exents are exchanged then the return code of this
-+     *     command will be non-zero.
-+     *  5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
-+     */
-+    xen_ulong_t nr_exchanged;
-+};
-+typedef struct xen_memory_exchange xen_memory_exchange_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
++#define VCPUOP_initialise            0
 +
 +/*
-+ * Returns the maximum machine frame number of mapped RAM in this system.
-+ * This command always succeeds (it never returns an error code).
-+ * arg == NULL.
++ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
++ * if the VCPU has not been initialised (VCPUOP_initialise).
 + */
-+#define XENMEM_maximum_ram_page     2
++#define VCPUOP_up                    1
 +
 +/*
-+ * Returns the current or maximum memory reservation, in pages, of the
-+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
-+ * arg == addr of domid_t.
++ * Bring down a VCPU (i.e., make it non-runnable).
++ * There are a few caveats that callers should observe:
++ *  1. This operation may return, and VCPU_is_up may return false, before the
++ *     VCPU stops running (i.e., the command is asynchronous). It is a good
++ *     idea to ensure that the VCPU has entered a non-critical loop before
++ *     bringing it down. Alternatively, this operation is guaranteed
++ *     synchronous if invoked by the VCPU itself.
++ *  2. After a VCPU is initialised, there is currently no way to drop all its
++ *     references to domain memory. Even a VCPU that is down still holds
++ *     memory references via its pagetable base pointer and GDT. It is good
++ *     practise to move a VCPU onto an 'idle' or default page table, LDT and
++ *     GDT before bringing it down.
 + */
-+#define XENMEM_current_reservation  3
-+#define XENMEM_maximum_reservation  4
++#define VCPUOP_down                  2
 +
-+/*
-+ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
-+ */
-+#define XENMEM_maximum_gpfn         14
++/* Returns 1 if the given VCPU is up. */
++#define VCPUOP_is_up                 3
 +
 +/*
-+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table do not implement
-+ * this command.
-+ * arg == addr of xen_machphys_mfn_list_t.
++ * Return information about the state and running time of a VCPU.
++ * @extra_arg == pointer to vcpu_runstate_info structure.
 + */
-+#define XENMEM_machphys_mfn_list    5
-+struct xen_machphys_mfn_list {
++#define VCPUOP_get_runstate_info     4
++struct vcpu_runstate_info {
++    /* VCPU's current state (RUNSTATE_*). */
++    int      state;
++    /* When was current state entered (system time, ns)? */
++    uint64_t state_entry_time;
 +    /*
-+     * Size of the 'extent_start' array. Fewer entries will be filled if the
-+     * machphys table is smaller than max_extents * 2MB.
++     * Time spent in each RUNSTATE_* (ns). The sum of these times is
++     * guaranteed not to drift from system time.
 +     */
-+    unsigned int max_extents;
++    uint64_t time[4];
++};
++typedef struct vcpu_runstate_info vcpu_runstate_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
 +
-+    /*
-+     * Pointer to buffer to fill with list of extent starts. If there are
-+     * any large discontiguities in the machine address space, 2MB gaps in
-+     * the machphys table will be represented by an MFN base of zero.
-+     */
-+    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++/* VCPU is currently running on a physical CPU. */
++#define RUNSTATE_running  0
 +
-+    /*
-+     * Number of extents written to the above array. This will be smaller
-+     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
-+     */
-+    unsigned int nr_extents;
-+};
-+typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
++/* VCPU is runnable, but not currently scheduled on any physical CPU. */
++#define RUNSTATE_runnable 1
 +
-+/*
-+ * Returns the location in virtual address space of the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table, or which do not
-+ * map it by default into guest address space, do not implement this command.
-+ * arg == addr of xen_machphys_mapping_t.
-+ */
-+#define XENMEM_machphys_mapping     12
-+struct xen_machphys_mapping {
-+    xen_ulong_t v_start, v_end; /* Start and end virtual addresses.   */
-+    xen_ulong_t max_mfn;        /* Maximum MFN that can be looked up. */
-+};
-+typedef struct xen_machphys_mapping xen_machphys_mapping_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
++/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
++#define RUNSTATE_blocked  2
 +
 +/*
-+ * Sets the GPFN at which a particular page appears in the specified guest's
-+ * pseudophysical address space.
-+ * arg == addr of xen_add_to_physmap_t.
++ * VCPU is not runnable, but it is not blocked.
++ * This is a 'catch all' state for things like hotplug and pauses by the
++ * system administrator (or for critical sections in the hypervisor).
++ * RUNSTATE_blocked dominates this state (it is the preferred state).
 + */
-+#define XENMEM_add_to_physmap      7
-+struct xen_add_to_physmap {
-+    /* Which domain to change the mapping for. */
-+    domid_t domid;
-+
-+    /* Source mapping space. */
-+#define XENMAPSPACE_shared_info 0 /* shared info page */
-+#define XENMAPSPACE_grant_table 1 /* grant table page */
-+    unsigned int space;
-+
-+    /* Index into source mapping space. */
-+    xen_ulong_t idx;
-+
-+    /* GPFN where the source mapping page should appear. */
-+    xen_pfn_t     gpfn;
-+};
-+typedef struct xen_add_to_physmap xen_add_to_physmap_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
++#define RUNSTATE_offline  3
 +
 +/*
-+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
-+ * code on failure. This call only works for auto-translated guests.
++ * Register a shared memory area from which the guest may obtain its own
++ * runstate information without needing to execute a hypercall.
++ * Notes:
++ *  1. The registered address may be virtual or physical or guest handle,
++ *     depending on the platform. Virtual address or guest handle should be
++ *     registered on x86 systems.
++ *  2. Only one shared area may be registered per VCPU. The shared area is
++ *     updated by the hypervisor each time the VCPU is scheduled. Thus
++ *     runstate.state will always be RUNSTATE_running and
++ *     runstate.state_entry_time will indicate the system time at which the
++ *     VCPU was last scheduled to run.
++ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
 + */
-+#define XENMEM_translate_gpfn_list  8
-+struct xen_translate_gpfn_list {
-+    /* Which domain to translate for? */
-+    domid_t domid;
-+
-+    /* Length of list. */
-+    xen_ulong_t nr_gpfns;
-+
-+    /* List of GPFNs to translate. */
-+    XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
-+
-+    /*
-+     * Output list to contain MFN translations. May be the same as the input
-+     * list (in which case each input GPFN is overwritten with the output MFN).
-+     */
-+    XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
++#define VCPUOP_register_runstate_memory_area 5
++struct vcpu_register_runstate_memory_area {
++    union {
++        XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
++        struct vcpu_runstate_info *v;
++        uint64_t p;
++    } addr;
 +};
-+typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
++typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
 +
 +/*
-+ * Returns the pseudo-physical memory map as it was when the domain
-+ * was started (specified by XENMEM_set_memory_map).
-+ * arg == addr of xen_memory_map_t.
-+ */
-+#define XENMEM_memory_map           9
-+struct xen_memory_map {
-+    /*
-+     * On call the number of entries which can be stored in buffer. On
-+     * return the number of entries which have been stored in
-+     * buffer.
-+     */
-+    unsigned int nr_entries;
-+
-+    /*
-+     * Entries in the buffer are in the same format as returned by the
-+     * BIOS INT 0x15 EAX=0xE820 call.
-+     */
-+    XEN_GUEST_HANDLE(void) buffer;
++ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
++ * which can be set via these commands. Periods smaller than one millisecond
++ * may not be supported.
++ */
++#define VCPUOP_set_periodic_timer    6 /* arg == vcpu_set_periodic_timer_t */
++#define VCPUOP_stop_periodic_timer   7 /* arg == NULL */
++struct vcpu_set_periodic_timer {
++    uint64_t period_ns;
 +};
-+typedef struct xen_memory_map xen_memory_map_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
++typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
 +
 +/*
-+ * Returns the real physical memory map. Passes the same structure as
-+ * XENMEM_memory_map.
-+ * arg == addr of xen_memory_map_t.
++ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
++ * timer which can be set via these commands.
 + */
-+#define XENMEM_machine_memory_map   10
++#define VCPUOP_set_singleshot_timer  8 /* arg == vcpu_set_singleshot_timer_t */
++#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
++struct vcpu_set_singleshot_timer {
++    uint64_t timeout_abs_ns;   /* Absolute system time value in nanoseconds. */
++    uint32_t flags;            /* VCPU_SSHOTTMR_??? */
++};
++typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
 +
-+/*
-+ * Set the pseudo-physical memory map of a domain, as returned by
-+ * XENMEM_memory_map.
-+ * arg == addr of xen_foreign_memory_map_t.
++/* Flags to VCPUOP_set_singleshot_timer. */
++ /* Require the timeout to be in the future (return -ETIME if it's passed). */
++#define _VCPU_SSHOTTMR_future (0)
++#define VCPU_SSHOTTMR_future  (1U << _VCPU_SSHOTTMR_future)
++
++/* 
++ * Register a memory location in the guest address space for the
++ * vcpu_info structure.  This allows the guest to place the vcpu_info
++ * structure in a convenient place, such as in a per-cpu data area.
++ * The pointer need not be page aligned, but the structure must not
++ * cross a page boundary.
++ *
++ * This may be called only once per vcpu.
 + */
-+#define XENMEM_set_memory_map       13
-+struct xen_foreign_memory_map {
-+    domid_t domid;
-+    struct xen_memory_map map;
++#define VCPUOP_register_vcpu_info   10  /* arg == vcpu_register_vcpu_info_t */
++struct vcpu_register_vcpu_info {
++    uint64_t mfn;    /* mfn of page to place vcpu_info */
++    uint32_t offset; /* offset within page */
++    uint32_t rsvd;   /* unused */
 +};
-+typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
++typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
 +
-+#endif /* __XEN_PUBLIC_MEMORY_H__ */
++/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
++#define VCPUOP_send_nmi             11
++
++/* 
++ * Get the physical ID information for a pinned vcpu's underlying physical
++ * processor.  The physical ID informmation is architecture-specific.
++ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
++ *         greater are reserved.
++ * This command returns -EINVAL if it is not a valid operation for this VCPU.
++ */
++#define VCPUOP_get_physid           12 /* arg == vcpu_get_physid_t */
++struct vcpu_get_physid {
++    uint64_t phys_id;
++};
++typedef struct vcpu_get_physid vcpu_get_physid_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
++#define xen_vcpu_physid_to_x86_apicid(physid) \
++    ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
++#define xen_vcpu_physid_to_x86_acpiid(physid) \
++    ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
++
++#endif /* __XEN_PUBLIC_VCPU_H__ */
 +
 +/*
 + * Local variables:
@@ -103201,14 +142707,14 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/nmi.h tmp-linux-2.6-xen.patch/include/xen/interface/nmi.h
---- pristine-linux-2.6.18.2/include/xen/interface/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/nmi.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,78 @@
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/version.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/version.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,91 @@
 +/******************************************************************************
-+ * nmi.h
++ * version.h
 + * 
-+ * NMI callback registration and reason codes.
++ * Xen version, type, and compile information.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -103228,51 +142734,64 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
++ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh at gmail.com>
 + * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
 + */
 +
-+#ifndef __XEN_PUBLIC_NMI_H__
-+#define __XEN_PUBLIC_NMI_H__
++#ifndef __XEN_PUBLIC_VERSION_H__
++#define __XEN_PUBLIC_VERSION_H__
 +
-+/*
-+ * NMI reason codes:
-+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
-+ */
-+ /* I/O-check error reported via ISA port 0x61, bit 6. */
-+#define _XEN_NMIREASON_io_error     0
-+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
-+ /* Parity error reported via ISA port 0x61, bit 7. */
-+#define _XEN_NMIREASON_parity_error 1
-+#define XEN_NMIREASON_parity_error  (1UL << _XEN_NMIREASON_parity_error)
-+ /* Unknown hardware-generated NMI. */
-+#define _XEN_NMIREASON_unknown      2
-+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
++/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
 +
-+/*
-+ * long nmi_op(unsigned int cmd, void *arg)
-+ * NB. All ops return zero on success, else a negative error code.
-+ */
++/* arg == NULL; returns major:minor (16:16). */
++#define XENVER_version      0
 +
-+/*
-+ * Register NMI callback for this (calling) VCPU. Currently this only makes
-+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
-+ * arg == pointer to xennmi_callback structure.
-+ */
-+#define XENNMI_register_callback   0
-+struct xennmi_callback {
-+    unsigned long handler_address;
-+    unsigned long pad;
++/* arg == xen_extraversion_t. */
++#define XENVER_extraversion 1
++typedef char xen_extraversion_t[16];
++#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
++
++/* arg == xen_compile_info_t. */
++#define XENVER_compile_info 2
++struct xen_compile_info {
++    char compiler[64];
++    char compile_by[16];
++    char compile_domain[32];
++    char compile_date[32];
 +};
-+typedef struct xennmi_callback xennmi_callback_t;
-+DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
++typedef struct xen_compile_info xen_compile_info_t;
 +
-+/*
-+ * Deregister NMI callback for this (calling) VCPU.
-+ * arg == NULL.
-+ */
-+#define XENNMI_unregister_callback 1
++#define XENVER_capabilities 3
++typedef char xen_capabilities_info_t[1024];
++#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
 +
-+#endif /* __XEN_PUBLIC_NMI_H__ */
++#define XENVER_changeset 4
++typedef char xen_changeset_info_t[64];
++#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
++
++#define XENVER_platform_parameters 5
++struct xen_platform_parameters {
++    unsigned long virt_start;
++};
++typedef struct xen_platform_parameters xen_platform_parameters_t;
++
++#define XENVER_get_features 6
++struct xen_feature_info {
++    unsigned int submap_idx;    /* IN: which 32-bit submap to return */
++    uint32_t     submap;        /* OUT: 32-bit submap */
++};
++typedef struct xen_feature_info xen_feature_info_t;
++
++/* Declares the features reported by XENVER_get_features. */
++#include "features.h"
++
++/* arg == NULL; returns host memory page size. */
++#define XENVER_pagesize 7
++
++/* arg == xen_domain_handle_t. */
++#define XENVER_guest_handle 8
++
++#endif /* __XEN_PUBLIC_VERSION_H__ */
 +
 +/*
 + * Local variables:
@@ -103283,11 +142802,15 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/physdev.h tmp-linux-2.6-xen.patch/include/xen/interface/physdev.h
---- pristine-linux-2.6.18.2/include/xen/interface/physdev.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/physdev.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,169 @@
-+/*
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xen-compat.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xen-compat.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,44 @@
++/******************************************************************************
++ * xen-compat.h
++ * 
++ * Guest OS interface to Xen.  Compatibility layer.
++ * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -103305,147 +142828,661 @@
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Christian Limpach
 + */
 +
-+#ifndef __XEN_PUBLIC_PHYSDEV_H__
-+#define __XEN_PUBLIC_PHYSDEV_H__
++#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
++#define __XEN_PUBLIC_XEN_COMPAT_H__
++
++#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209
++
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++/* Xen is built with matching headers and implements the latest interface. */
++#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
++#elif !defined(__XEN_INTERFACE_VERSION__)
++/* Guests which do not specify a version get the legacy interface. */
++#define __XEN_INTERFACE_VERSION__ 0x00000000
++#endif
++
++#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
++#error "These header files do not support the requested interface version."
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xen.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xen.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,639 @@
++/******************************************************************************
++ * xen.h
++ * 
++ * Guest OS interface to Xen.
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_XEN_H__
++#define __XEN_PUBLIC_XEN_H__
++
++#include "xen-compat.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#include "arch-x86/xen.h"
++#elif defined(__ia64__)
++#include "arch-ia64.h"
++#else
++#error "Unsupported architecture"
++#endif
++
++#ifndef __ASSEMBLY__
++/* Guest handles for primitive C types. */
++DEFINE_XEN_GUEST_HANDLE(char);
++__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
++DEFINE_XEN_GUEST_HANDLE(int);
++__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
++DEFINE_XEN_GUEST_HANDLE(long);
++__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
++DEFINE_XEN_GUEST_HANDLE(void);
++
++DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
++#endif
 +
 +/*
-+ * Prototype for this hypercall is:
-+ *  int physdev_op(int cmd, void *args)
-+ * @cmd  == PHYSDEVOP_??? (physdev operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
++ * HYPERCALLS
++ */
++
++#define __HYPERVISOR_set_trap_table        0
++#define __HYPERVISOR_mmu_update            1
++#define __HYPERVISOR_set_gdt               2
++#define __HYPERVISOR_stack_switch          3
++#define __HYPERVISOR_set_callbacks         4
++#define __HYPERVISOR_fpu_taskswitch        5
++#define __HYPERVISOR_sched_op_compat       6 /* compat since 0x00030101 */
++#define __HYPERVISOR_platform_op           7
++#define __HYPERVISOR_set_debugreg          8
++#define __HYPERVISOR_get_debugreg          9
++#define __HYPERVISOR_update_descriptor    10
++#define __HYPERVISOR_memory_op            12
++#define __HYPERVISOR_multicall            13
++#define __HYPERVISOR_update_va_mapping    14
++#define __HYPERVISOR_set_timer_op         15
++#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
++#define __HYPERVISOR_xen_version          17
++#define __HYPERVISOR_console_io           18
++#define __HYPERVISOR_physdev_op_compat    19 /* compat since 0x00030202 */
++#define __HYPERVISOR_grant_table_op       20
++#define __HYPERVISOR_vm_assist            21
++#define __HYPERVISOR_update_va_mapping_otherdomain 22
++#define __HYPERVISOR_iret                 23 /* x86 only */
++#define __HYPERVISOR_vcpu_op              24
++#define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
++#define __HYPERVISOR_mmuext_op            26
++#define __HYPERVISOR_xsm_op               27
++#define __HYPERVISOR_nmi_op               28
++#define __HYPERVISOR_sched_op             29
++#define __HYPERVISOR_callback_op          30
++#define __HYPERVISOR_xenoprof_op          31
++#define __HYPERVISOR_event_channel_op     32
++#define __HYPERVISOR_physdev_op           33
++#define __HYPERVISOR_hvm_op               34
++#define __HYPERVISOR_sysctl               35
++#define __HYPERVISOR_domctl               36
++#define __HYPERVISOR_kexec_op             37
++
++/* Architecture-specific hypercall definitions. */
++#define __HYPERVISOR_arch_0               48
++#define __HYPERVISOR_arch_1               49
++#define __HYPERVISOR_arch_2               50
++#define __HYPERVISOR_arch_3               51
++#define __HYPERVISOR_arch_4               52
++#define __HYPERVISOR_arch_5               53
++#define __HYPERVISOR_arch_6               54
++#define __HYPERVISOR_arch_7               55
++
++/*
++ * HYPERCALL COMPATIBILITY.
++ */
++
++/* New sched_op hypercall introduced in 0x00030101. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030101
++#undef __HYPERVISOR_sched_op
++#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
++#endif
++
++/* New event-channel and physdev hypercalls introduced in 0x00030202. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030202
++#undef __HYPERVISOR_event_channel_op
++#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
++#undef __HYPERVISOR_physdev_op
++#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
++#endif
++
++/* New platform_op hypercall introduced in 0x00030204. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030204
++#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
++#endif
++
++/* 
++ * VIRTUAL INTERRUPTS
++ * 
++ * Virtual interrupts that a guest OS may receive from Xen.
++ * 
++ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
++ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
++ * The latter can be allocated only once per guest: they must initially be
++ * allocated to VCPU0 but can subsequently be re-bound.
 + */
++#define VIRQ_TIMER      0  /* V. Timebase update, and/or requested timeout.  */
++#define VIRQ_DEBUG      1  /* V. Request guest to dump debug info.           */
++#define VIRQ_CONSOLE    2  /* G. (DOM0) Bytes received on emergency console. */
++#define VIRQ_DOM_EXC    3  /* G. (DOM0) Exceptional event for some domain.   */
++#define VIRQ_TBUF       4  /* G. (DOM0) Trace buffer has records available.  */
++#define VIRQ_DEBUGGER   6  /* G. (DOM0) A domain has paused for debugging.   */
++#define VIRQ_XENOPROF   7  /* V. XenOprofile interrupt: new sample available */
++#define VIRQ_CON_RING   8  /* G. (DOM0) Bytes received on console            */
++
++/* Architecture-specific VIRQ definitions. */
++#define VIRQ_ARCH_0    16
++#define VIRQ_ARCH_1    17
++#define VIRQ_ARCH_2    18
++#define VIRQ_ARCH_3    19
++#define VIRQ_ARCH_4    20
++#define VIRQ_ARCH_5    21
++#define VIRQ_ARCH_6    22
++#define VIRQ_ARCH_7    23
++
++#define NR_VIRQS       24
 +
 +/*
-+ * Notify end-of-interrupt (EOI) for the specified IRQ.
-+ * @arg == pointer to physdev_eoi structure.
++ * MMU-UPDATE REQUESTS
++ * 
++ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * ptr[1:0] specifies the appropriate MMU_* command.
++ * 
++ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
++ * Updates an entry in a page table. If updating an L1 table, and the new
++ * table entry is valid/present, the mapped frame must belong to the FD, if
++ * an FD has been specified. If attempting to map an I/O page then the
++ * caller assumes the privilege of the FD.
++ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
++ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
++ * ptr[:2]  -- Machine address of the page-table entry to modify.
++ * val      -- Value to write.
++ * 
++ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
++ * Updates an entry in the machine->pseudo-physical mapping table.
++ * ptr[:2]  -- Machine address within the frame whose mapping to modify.
++ *             The frame must belong to the FD, if one is specified.
++ * val      -- Value to write into the mapping entry.
++ * 
++ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
++ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
++ * with those in @val.
++ */
++#define MMU_NORMAL_PT_UPDATE      0 /* checked '*ptr = val'. ptr is MA.      */
++#define MMU_MACHPHYS_UPDATE       1 /* ptr = MA of frame to modify entry for */
++#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
++
++/*
++ * MMU EXTENDED OPERATIONS
++ * 
++ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * 
++ * cmd: MMUEXT_(UN)PIN_*_TABLE
++ * mfn: Machine frame number to be (un)pinned as a p.t. page.
++ *      The frame must belong to the FD, if one is specified.
++ * 
++ * cmd: MMUEXT_NEW_BASEPTR
++ * mfn: Machine frame number of new page-table base to install in MMU.
++ * 
++ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
++ * mfn: Machine frame number of new page-table base to install in MMU
++ *      when in user space.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_LOCAL
++ * No additional arguments. Flushes local TLB.
++ * 
++ * cmd: MMUEXT_INVLPG_LOCAL
++ * linear_addr: Linear address to be flushed from the local TLB.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_MULTI
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * 
++ * cmd: MMUEXT_INVLPG_MULTI
++ * linear_addr: Linear address to be flushed.
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_ALL
++ * No additional arguments. Flushes all VCPUs' TLBs.
++ * 
++ * cmd: MMUEXT_INVLPG_ALL
++ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
++ * 
++ * cmd: MMUEXT_FLUSH_CACHE
++ * No additional arguments. Writes back and flushes cache contents.
++ * 
++ * cmd: MMUEXT_SET_LDT
++ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
++ * nr_ents: Number of entries in LDT.
 + */
-+#define PHYSDEVOP_eoi                   12
-+struct physdev_eoi {
-+    /* IN */
-+    uint32_t irq;
++#define MMUEXT_PIN_L1_TABLE      0
++#define MMUEXT_PIN_L2_TABLE      1
++#define MMUEXT_PIN_L3_TABLE      2
++#define MMUEXT_PIN_L4_TABLE      3
++#define MMUEXT_UNPIN_TABLE       4
++#define MMUEXT_NEW_BASEPTR       5
++#define MMUEXT_TLB_FLUSH_LOCAL   6
++#define MMUEXT_INVLPG_LOCAL      7
++#define MMUEXT_TLB_FLUSH_MULTI   8
++#define MMUEXT_INVLPG_MULTI      9
++#define MMUEXT_TLB_FLUSH_ALL    10
++#define MMUEXT_INVLPG_ALL       11
++#define MMUEXT_FLUSH_CACHE      12
++#define MMUEXT_SET_LDT          13
++#define MMUEXT_NEW_USER_BASEPTR 15
++
++#ifndef __ASSEMBLY__
++struct mmuext_op {
++    unsigned int cmd;
++    union {
++        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
++        xen_pfn_t     mfn;
++        /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
++        unsigned long linear_addr;
++    } arg1;
++    union {
++        /* SET_LDT */
++        unsigned int nr_ents;
++        /* TLB_FLUSH_MULTI, INVLPG_MULTI */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030205
++        XEN_GUEST_HANDLE(void) vcpumask;
++#else
++        void *vcpumask;
++#endif
++    } arg2;
 +};
-+typedef struct physdev_eoi physdev_eoi_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
++typedef struct mmuext_op mmuext_op_t;
++DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
++#endif
++
++/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
++/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
++/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
++#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
++#define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
++#define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
++#define UVMF_FLUSHTYPE_MASK     (3UL<<0)
++#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
++#define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
++#define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
++
++/*
++ * Commands to HYPERVISOR_console_io().
++ */
++#define CONSOLEIO_write         0
++#define CONSOLEIO_read          1
++
++/*
++ * Commands to HYPERVISOR_vm_assist().
++ */
++#define VMASST_CMD_enable                0
++#define VMASST_CMD_disable               1
++
++/* x86/32 guests: simulate full 4GB segment limits. */
++#define VMASST_TYPE_4gb_segments         0
++
++/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
++#define VMASST_TYPE_4gb_segments_notify  1
++
++/*
++ * x86 guests: support writes to bottom-level PTEs.
++ * NB1. Page-directory entries cannot be written.
++ * NB2. Guest must continue to remove all writable mappings of PTEs.
++ */
++#define VMASST_TYPE_writable_pagetables  2
++
++/* x86/PAE guests: support PDPTs above 4GB. */
++#define VMASST_TYPE_pae_extended_cr3     3
++
++#define MAX_VMASST_TYPE                  3
++
++#ifndef __ASSEMBLY__
++
++typedef uint16_t domid_t;
++
++/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
++#define DOMID_FIRST_RESERVED (0x7FF0U)
++
++/* DOMID_SELF is used in certain contexts to refer to oneself. */
++#define DOMID_SELF (0x7FF0U)
++
++/*
++ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
++ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
++ * is useful to ensure that no mappings to the OS's own heap are accidentally
++ * installed. (e.g., in Linux this could cause havoc as reference counts
++ * aren't adjusted on the I/O-mapping code path).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
++ * be specified by any calling domain.
++ */
++#define DOMID_IO   (0x7FF1U)
++
++/*
++ * DOMID_XEN is used to allow privileged domains to map restricted parts of
++ * Xen's heap space (e.g., the machine_to_phys table).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
++ * the caller is privileged.
++ */
++#define DOMID_XEN  (0x7FF2U)
 +
 +/*
-+ * Query the status of an IRQ line.
-+ * @arg == pointer to physdev_irq_status_query structure.
++ * Send an array of these to HYPERVISOR_mmu_update().
++ * NB. The fields are natural pointer/address size for this architecture.
 + */
-+#define PHYSDEVOP_irq_status_query       5
-+struct physdev_irq_status_query {
-+    /* IN */
-+    uint32_t irq;
-+    /* OUT */
-+    uint32_t flags; /* XENIRQSTAT_* */
++struct mmu_update {
++    uint64_t ptr;       /* Machine address of PTE. */
++    uint64_t val;       /* New contents of PTE.    */
 +};
-+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
-+
-+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
-+#define _XENIRQSTAT_needs_eoi   (0)
-+#define  XENIRQSTAT_needs_eoi   (1U<<_XENIRQSTAT_needs_eoi)
-+
-+/* IRQ shared by multiple guests? */
-+#define _XENIRQSTAT_shared      (1)
-+#define  XENIRQSTAT_shared      (1U<<_XENIRQSTAT_shared)
++typedef struct mmu_update mmu_update_t;
++DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
 +
 +/*
-+ * Set the current VCPU's I/O privilege level.
-+ * @arg == pointer to physdev_set_iopl structure.
++ * Send an array of these to HYPERVISOR_multicall().
++ * NB. The fields are natural register size for this architecture.
 + */
-+#define PHYSDEVOP_set_iopl               6
-+struct physdev_set_iopl {
-+    /* IN */
-+    uint32_t iopl;
++struct multicall_entry {
++    unsigned long op, result;
++    unsigned long args[6];
 +};
-+typedef struct physdev_set_iopl physdev_set_iopl_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
++typedef struct multicall_entry multicall_entry_t;
++DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
 +
 +/*
-+ * Set the current VCPU's I/O-port permissions bitmap.
-+ * @arg == pointer to physdev_set_iobitmap structure.
++ * Event channel endpoints per domain:
++ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
 + */
-+#define PHYSDEVOP_set_iobitmap           7
-+struct physdev_set_iobitmap {
-+    /* IN */
-+    XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
-+    uint32_t nr_ports;
-+};
-+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
++#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
++
++struct vcpu_time_info {
++    /*
++     * Updates to the following values are preceded and followed by an
++     * increment of 'version'. The guest can therefore detect updates by
++     * looking for changes to 'version'. If the least-significant bit of
++     * the version number is set then an update is in progress and the guest
++     * must wait to read a consistent set of values.
++     * The correct way to interact with the version number is similar to
++     * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
++     */
++    uint32_t version;
++    uint32_t pad0;
++    uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
++    uint64_t system_time;     /* Time, in nanosecs, since boot.    */
++    /*
++     * Current system time:
++     *   system_time +
++     *   ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
++     * CPU frequency (Hz):
++     *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
++     */
++    uint32_t tsc_to_system_mul;
++    int8_t   tsc_shift;
++    int8_t   pad1[3];
++}; /* 32 bytes */
++typedef struct vcpu_time_info vcpu_time_info_t;
++
++struct vcpu_info {
++    /*
++     * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
++     * a pending notification for a particular VCPU. It is then cleared 
++     * by the guest OS /before/ checking for pending work, thus avoiding
++     * a set-and-check race. Note that the mask is only accessed by Xen
++     * on the CPU that is currently hosting the VCPU. This means that the
++     * pending and mask flags can be updated by the guest without special
++     * synchronisation (i.e., no need for the x86 LOCK prefix).
++     * This may seem suboptimal because if the pending flag is set by
++     * a different CPU then an IPI may be scheduled even when the mask
++     * is set. However, note:
++     *  1. The task of 'interrupt holdoff' is covered by the per-event-
++     *     channel mask bits. A 'noisy' event that is continually being
++     *     triggered can be masked at source at this very precise
++     *     granularity.
++     *  2. The main purpose of the per-VCPU mask is therefore to restrict
++     *     reentrant execution: whether for concurrency control, or to
++     *     prevent unbounded stack usage. Whatever the purpose, we expect
++     *     that the mask will be asserted only for short periods at a time,
++     *     and so the likelihood of a 'spurious' IPI is suitably small.
++     * The mask is read before making an event upcall to the guest: a
++     * non-zero mask therefore guarantees that the VCPU will not receive
++     * an upcall activation. The mask is cleared when the VCPU requests
++     * to block: this avoids wakeup-waiting races.
++     */
++    uint8_t evtchn_upcall_pending;
++    uint8_t evtchn_upcall_mask;
++    unsigned long evtchn_pending_sel;
++    struct arch_vcpu_info arch;
++    struct vcpu_time_info time;
++}; /* 64 bytes (x86) */
++#ifndef __XEN__
++typedef struct vcpu_info vcpu_info_t;
++#endif
 +
 +/*
-+ * Read or write an IO-APIC register.
-+ * @arg == pointer to physdev_apic structure.
++ * Xen/kernel shared data -- pointer provided in start_info.
++ *
++ * This structure is defined to be both smaller than a page, and the
++ * only data on the shared page, but may vary in actual size even within
++ * compatible Xen versions; guests should not rely on the size
++ * of this structure remaining constant.
 + */
-+#define PHYSDEVOP_apic_read              8
-+#define PHYSDEVOP_apic_write             9
-+struct physdev_apic {
-+    /* IN */
-+    unsigned long apic_physbase;
-+    uint32_t reg;
-+    /* IN or OUT */
-+    uint32_t value;
++struct shared_info {
++    struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
++
++    /*
++     * A domain can create "event channels" on which it can send and receive
++     * asynchronous event notifications. There are three classes of event that
++     * are delivered by this mechanism:
++     *  1. Bi-directional inter- and intra-domain connections. Domains must
++     *     arrange out-of-band to set up a connection (usually by allocating
++     *     an unbound 'listener' port and avertising that via a storage service
++     *     such as xenstore).
++     *  2. Physical interrupts. A domain with suitable hardware-access
++     *     privileges can bind an event-channel port to a physical interrupt
++     *     source.
++     *  3. Virtual interrupts ('events'). A domain can bind an event-channel
++     *     port to a virtual interrupt source, such as the virtual-timer
++     *     device or the emergency console.
++     * 
++     * Event channels are addressed by a "port index". Each channel is
++     * associated with two bits of information:
++     *  1. PENDING -- notifies the domain that there is a pending notification
++     *     to be processed. This bit is cleared by the guest.
++     *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
++     *     will cause an asynchronous upcall to be scheduled. This bit is only
++     *     updated by the guest. It is read-only within Xen. If a channel
++     *     becomes pending while the channel is masked then the 'edge' is lost
++     *     (i.e., when the channel is unmasked, the guest must manually handle
++     *     pending notifications as no upcall will be scheduled by Xen).
++     * 
++     * To expedite scanning of pending notifications, any 0->1 pending
++     * transition on an unmasked channel causes a corresponding bit in a
++     * per-vcpu selector word to be set. Each bit in the selector covers a
++     * 'C long' in the PENDING bitfield array.
++     */
++    unsigned long evtchn_pending[sizeof(unsigned long) * 8];
++    unsigned long evtchn_mask[sizeof(unsigned long) * 8];
++
++    /*
++     * Wallclock time: updated only by control software. Guests should base
++     * their gettimeofday() syscall on this wallclock-base value.
++     */
++    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
++    uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
++    uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
++
++    struct arch_shared_info arch;
++
 +};
-+typedef struct physdev_apic physdev_apic_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
++#ifndef __XEN__
++typedef struct shared_info shared_info_t;
++#endif
 +
 +/*
-+ * Allocate or free a physical upcall vector for the specified IRQ line.
-+ * @arg == pointer to physdev_irq structure.
++ * Start-of-day memory layout:
++ *  1. The domain is started within contiguous virtual-memory region.
++ *  2. The contiguous region ends on an aligned 4MB boundary.
++ *  3. This the order of bootstrap elements in the initial virtual region:
++ *      a. relocated kernel image
++ *      b. initial ram disk              [mod_start, mod_len]
++ *      c. list of allocated page frames [mfn_list, nr_pages]
++ *      d. start_info_t structure        [register ESI (x86)]
++ *      e. bootstrap page tables         [pt_base, CR3 (x86)]
++ *      f. bootstrap stack               [register ESP (x86)]
++ *  4. Bootstrap elements are packed together, but each is 4kB-aligned.
++ *  5. The initial ram disk may be omitted.
++ *  6. The list of page frames forms a contiguous 'pseudo-physical' memory
++ *     layout for the domain. In particular, the bootstrap virtual-memory
++ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
++ *  7. All bootstrap elements are mapped read-writable for the guest OS. The
++ *     only exception is the bootstrap page table, which is mapped read-only.
++ *  8. There is guaranteed to be at least 512kB padding after the final
++ *     bootstrap element. If necessary, the bootstrap virtual region is
++ *     extended by an extra 4MB to ensure this.
 + */
-+#define PHYSDEVOP_alloc_irq_vector      10
-+#define PHYSDEVOP_free_irq_vector       11
-+struct physdev_irq {
-+    /* IN */
-+    uint32_t irq;
-+    /* IN or OUT */
-+    uint32_t vector;
++
++#define MAX_GUEST_CMDLINE 1024
++struct start_info {
++    /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
++    char magic[32];             /* "xen-<version>-<platform>".            */
++    unsigned long nr_pages;     /* Total pages allocated to this domain.  */
++    unsigned long shared_info;  /* MACHINE address of shared info struct. */
++    uint32_t flags;             /* SIF_xxx flags.                         */
++    xen_pfn_t store_mfn;        /* MACHINE page number of shared page.    */
++    uint32_t store_evtchn;      /* Event channel for store communication. */
++    union {
++        struct {
++            xen_pfn_t mfn;      /* MACHINE page number of console page.   */
++            uint32_t  evtchn;   /* Event channel for console page.        */
++        } domU;
++        struct {
++            uint32_t info_off;  /* Offset of console_info struct.         */
++            uint32_t info_size; /* Size of console_info struct from start.*/
++        } dom0;
++    } console;
++    /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
++    unsigned long pt_base;      /* VIRTUAL address of page directory.     */
++    unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
++    unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
++    unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
++    unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
++    int8_t cmd_line[MAX_GUEST_CMDLINE];
 +};
-+typedef struct physdev_irq physdev_irq_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
++typedef struct start_info start_info_t;
++
++/* New console union for dom0 introduced in 0x00030203. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++#define console_mfn    console.domU.mfn
++#define console_evtchn console.domU.evtchn
++#endif
++
++/* These flags are passed in the 'flags' field of start_info_t. */
++#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
++#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
++#define SIF_PM_MASK       (0xFF<<8) /* reserve 1 byte for xen-pm options */
++
++typedef struct dom0_vga_console_info {
++    uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
++#define XEN_VGATYPE_TEXT_MODE_3 0x03
++#define XEN_VGATYPE_VESA_LFB    0x23
 +
-+/*
-+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
-+ * hypercall since 0x00030202.
-+ */
-+struct physdev_op {
-+    uint32_t cmd;
 +    union {
-+        struct physdev_irq_status_query      irq_status_query;
-+        struct physdev_set_iopl              set_iopl;
-+        struct physdev_set_iobitmap          set_iobitmap;
-+        struct physdev_apic                  apic_op;
-+        struct physdev_irq                   irq_op;
++        struct {
++            /* Font height, in pixels. */
++            uint16_t font_height;
++            /* Cursor location (column, row). */
++            uint16_t cursor_x, cursor_y;
++            /* Number of rows and columns (dimensions in characters). */
++            uint16_t rows, columns;
++        } text_mode_3;
++
++        struct {
++            /* Width and height, in pixels. */
++            uint16_t width, height;
++            /* Bytes per scan line. */
++            uint16_t bytes_per_line;
++            /* Bits per pixel. */
++            uint16_t bits_per_pixel;
++            /* LFB physical address, and size (in units of 64kB). */
++            uint32_t lfb_base;
++            uint32_t lfb_size;
++            /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
++            uint8_t  red_pos, red_size;
++            uint8_t  green_pos, green_size;
++            uint8_t  blue_pos, blue_size;
++            uint8_t  rsvd_pos, rsvd_size;
++#if __XEN_INTERFACE_VERSION__ >= 0x00030206
++            /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
++            uint32_t gbl_caps;
++            /* Mode attributes (offset 0x0, VESA command 0x4f01). */
++            uint16_t mode_attrs;
++#endif
++        } vesa_lfb;
 +    } u;
-+};
-+typedef struct physdev_op physdev_op_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
++} dom0_vga_console_info_t;
++#define xen_vga_console_info dom0_vga_console_info
++#define xen_vga_console_info_t dom0_vga_console_info_t
 +
-+/*
-+ * Notify that some PIRQ-bound event channels have been unmasked.
-+ * ** This command is obsolete since interface version 0x00030202 and is **
-+ * ** unsupported by newer versions of Xen.                              **
-+ */
-+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY      4
++typedef uint8_t xen_domain_handle_t[16];
 +
-+/*
-+ * These all-capitals physdev operation names are superceded by the new names
-+ * (defined above) since interface version 0x00030202.
-+ */
-+#define PHYSDEVOP_IRQ_STATUS_QUERY       PHYSDEVOP_irq_status_query
-+#define PHYSDEVOP_SET_IOPL               PHYSDEVOP_set_iopl
-+#define PHYSDEVOP_SET_IOBITMAP           PHYSDEVOP_set_iobitmap
-+#define PHYSDEVOP_APIC_READ              PHYSDEVOP_apic_read
-+#define PHYSDEVOP_APIC_WRITE             PHYSDEVOP_apic_write
-+#define PHYSDEVOP_ASSIGN_VECTOR          PHYSDEVOP_alloc_irq_vector
-+#define PHYSDEVOP_FREE_VECTOR            PHYSDEVOP_free_irq_vector
-+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
-+#define PHYSDEVOP_IRQ_SHARED             XENIRQSTAT_shared
++/* Turn a plain number into a C unsigned long constant. */
++#define __mk_unsigned_long(x) x ## UL
++#define mk_unsigned_long(x) __mk_unsigned_long(x)
 +
-+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
++__DEFINE_XEN_GUEST_HANDLE(uint8,  uint8_t);
++__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
++__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
++__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
++
++#else /* __ASSEMBLY__ */
++
++/* In assembly code we cannot use C numeric constant suffixes. */
++#define mk_unsigned_long(x) x
++
++#endif /* !__ASSEMBLY__ */
++
++/* Default definitions for macros used by domctl/sysctl. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#ifndef uint64_aligned_t
++#define uint64_aligned_t uint64_t
++#endif
++#ifndef XEN_GUEST_HANDLE_64
++#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
++#endif
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_H__ */
 +
 +/*
 + * Local variables:
@@ -103456,15 +143493,11 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/platform.h tmp-linux-2.6-xen.patch/include/xen/interface/platform.h
---- pristine-linux-2.6.18.2/include/xen/interface/platform.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/platform.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,183 @@
-+/******************************************************************************
-+ * platform.h
-+ * 
-+ * Hardware platform operations. Intended for use by domain-0 kernel.
-+ * 
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xencomm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xencomm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,41 @@
++/*
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -103483,174 +143516,37 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2002-2006, K Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_PLATFORM_H__
-+#define __XEN_PUBLIC_PLATFORM_H__
-+
-+#include "xen.h"
-+
-+#define XENPF_INTERFACE_VERSION 0x03000001
-+
-+/*
-+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
-+ * 1 January, 1970 if the current system time was <system_time>.
-+ */
-+#define XENPF_settime             17
-+struct xenpf_settime {
-+    /* IN variables. */
-+    uint32_t secs;
-+    uint32_t nsecs;
-+    uint64_t system_time;
-+};
-+typedef struct xenpf_settime xenpf_settime_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
-+
-+/*
-+ * Request memory range (@mfn, @mfn+ at nr_mfns-1) to have type @type.
-+ * On x86, @type is an architecture-defined MTRR memory type.
-+ * On success, returns the MTRR that was used (@reg) and a handle that can
-+ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
-+ * (x86-specific).
-+ */
-+#define XENPF_add_memtype         31
-+struct xenpf_add_memtype {
-+    /* IN variables. */
-+    xen_pfn_t mfn;
-+    uint64_t nr_mfns;
-+    uint32_t type;
-+    /* OUT variables. */
-+    uint32_t handle;
-+    uint32_t reg;
-+};
-+typedef struct xenpf_add_memtype xenpf_add_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
-+
-+/*
-+ * Tear down an existing memory-range type. If @handle is remembered then it
-+ * should be passed in to accurately tear down the correct setting (in case
-+ * of overlapping memory regions with differing types). If it is not known
-+ * then @handle should be set to zero. In all cases @reg must be set.
-+ * (x86-specific).
++ * Copyright (C) IBM Corp. 2006
 + */
-+#define XENPF_del_memtype         32
-+struct xenpf_del_memtype {
-+    /* IN variables. */
-+    uint32_t handle;
-+    uint32_t reg;
-+};
-+typedef struct xenpf_del_memtype xenpf_del_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
-+
-+/* Read current type of an MTRR (x86-specific). */
-+#define XENPF_read_memtype        33
-+struct xenpf_read_memtype {
-+    /* IN variables. */
-+    uint32_t reg;
-+    /* OUT variables. */
-+    xen_pfn_t mfn;
-+    uint64_t nr_mfns;
-+    uint32_t type;
-+};
-+typedef struct xenpf_read_memtype xenpf_read_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
-+
-+#define XENPF_microcode_update    35
-+struct xenpf_microcode_update {
-+    /* IN variables. */
-+    XEN_GUEST_HANDLE(void) data;      /* Pointer to microcode data */
-+    uint32_t length;                  /* Length of microcode data. */
-+};
-+typedef struct xenpf_microcode_update xenpf_microcode_update_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
-+
-+#define XENPF_platform_quirk      39
-+#define QUIRK_NOIRQBALANCING      1 /* Do not restrict IO-APIC RTE targets */
-+#define QUIRK_IOAPIC_BAD_REGSEL   2 /* IO-APIC REGSEL forgets its value    */
-+#define QUIRK_IOAPIC_GOOD_REGSEL  3 /* IO-APIC REGSEL behaves properly     */
-+struct xenpf_platform_quirk {
-+    /* IN variables. */
-+    uint32_t quirk_id;
-+};
-+typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
-+
-+#define XENPF_firmware_info       50
-+#define XEN_FW_DISK_INFO          1 /* from int 13 AH=08/41/48 */
-+#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
-+#define XEN_FW_VBEDDC_INFO        3 /* from int 10 AX=4f15 */
-+struct xenpf_firmware_info {
-+    /* IN variables. */
-+    uint32_t type;
-+    uint32_t index;
-+    /* OUT variables. */
-+    union {
-+        struct {
-+            /* Int13, Fn48: Check Extensions Present. */
-+            uint8_t device;                   /* %dl: bios device number */
-+            uint8_t version;                  /* %ah: major version      */
-+            uint16_t interface_support;       /* %cx: support bitmap     */
-+            /* Int13, Fn08: Legacy Get Device Parameters. */
-+            uint16_t legacy_max_cylinder;     /* %cl[7:6]:%ch: max cyl # */
-+            uint8_t legacy_max_head;          /* %dh: max head #         */
-+            uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector #  */
-+            /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
-+            /* NB. First uint16_t of buffer must be set to buffer size.      */
-+            XEN_GUEST_HANDLE(void) edd_params;
-+        } disk_info; /* XEN_FW_DISK_INFO */
-+        struct {
-+            uint8_t device;                   /* bios device number  */
-+            uint32_t mbr_signature;           /* offset 0x1b8 in mbr */
-+        } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
-+        struct {
-+            /* Int10, AX=4F15: Get EDID info. */
-+            uint8_t capabilities;
-+            uint8_t edid_transfer_time;
-+            /* must refer to 128-byte buffer */
-+            XEN_GUEST_HANDLE(uint8_t) edid;
-+        } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
-+    } u;
-+};
-+typedef struct xenpf_firmware_info xenpf_firmware_info_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
 +
-+struct xen_platform_op {
-+    uint32_t cmd;
-+    uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
-+    union {
-+        struct xenpf_settime           settime;
-+        struct xenpf_add_memtype       add_memtype;
-+        struct xenpf_del_memtype       del_memtype;
-+        struct xenpf_read_memtype      read_memtype;
-+        struct xenpf_microcode_update  microcode;
-+        struct xenpf_platform_quirk    platform_quirk;
-+        struct xenpf_firmware_info     firmware_info;
-+        uint8_t                        pad[128];
-+    } u;
-+};
-+typedef struct xen_platform_op xen_platform_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
-+
-+#endif /* __XEN_PUBLIC_PLATFORM_H__ */
++#ifndef _XEN_XENCOMM_H_
++#define _XEN_XENCOMM_H_
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++/* A xencomm descriptor is a scatter/gather list containing physical
++ * addresses corresponding to a virtually contiguous memory area. The
++ * hypervisor translates these physical addresses to machine addresses to copy
++ * to and from the virtually contiguous area.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/sched.h tmp-linux-2.6-xen.patch/include/xen/interface/sched.h
---- pristine-linux-2.6.18.2/include/xen/interface/sched.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/sched.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,121 @@
++
++#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
++#define XENCOMM_INVALID (~0UL)
++
++struct xencomm_desc {
++    uint32_t magic;
++    uint32_t nr_addrs; /* the number of entries in address[] */
++    uint64_t address[0];
++};
++
++#endif /* _XEN_XENCOMM_H_ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xenoprof.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xenoprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,138 @@
 +/******************************************************************************
-+ * sched.h
++ * xenoprof.h
 + * 
-+ * Scheduler state interactions
++ * Interface for enabling system wide profiling based on hardware performance
++ * counters
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
@@ -103670,94 +143566,110 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ * Written by Aravind Menon & Jose Renato Santos
 + */
 +
-+#ifndef __XEN_PUBLIC_SCHED_H__
-+#define __XEN_PUBLIC_SCHED_H__
++#ifndef __XEN_PUBLIC_XENOPROF_H__
++#define __XEN_PUBLIC_XENOPROF_H__
 +
-+#include "event_channel.h"
++#include "xen.h"
 +
 +/*
-+ * The prototype for this hypercall is:
-+ *  long sched_op(int cmd, void *arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == Operation-specific extra argument(s), as described below.
-+ * 
-+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
-+ * of this hypercall, supporting only the commands yield, block and shutdown:
-+ *  long sched_op(int cmd, unsigned long arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == 0               (SCHEDOP_yield and SCHEDOP_block)
-+ *      == SHUTDOWN_* code (SCHEDOP_shutdown)
-+ * This legacy version is available to new guests as sched_op_compat().
++ * Commands to HYPERVISOR_xenoprof_op().
 + */
++#define XENOPROF_init                0
++#define XENOPROF_reset_active_list   1
++#define XENOPROF_reset_passive_list  2
++#define XENOPROF_set_active          3
++#define XENOPROF_set_passive         4
++#define XENOPROF_reserve_counters    5
++#define XENOPROF_counter             6
++#define XENOPROF_setup_events        7
++#define XENOPROF_enable_virq         8
++#define XENOPROF_start               9
++#define XENOPROF_stop               10
++#define XENOPROF_disable_virq       11
++#define XENOPROF_release_counters   12
++#define XENOPROF_shutdown           13
++#define XENOPROF_get_buffer         14
++#define XENOPROF_set_backtrace      15
++#define XENOPROF_last_op            15
 +
-+/*
-+ * Voluntarily yield the CPU.
-+ * @arg == NULL.
-+ */
-+#define SCHEDOP_yield       0
++#define MAX_OPROF_EVENTS    32
++#define MAX_OPROF_DOMAINS   25
++#define XENOPROF_CPU_TYPE_SIZE 64
 +
-+/*
-+ * Block execution of this VCPU until an event is received for processing.
-+ * If called with event upcalls masked, this operation will atomically
-+ * reenable event delivery and check for pending events before blocking the
-+ * VCPU. This avoids a "wakeup waiting" race.
-+ * @arg == NULL.
-+ */
-+#define SCHEDOP_block       1
++/* Xenoprof performance events (not Xen events) */
++struct event_log {
++    uint64_t eip;
++    uint8_t mode;
++    uint8_t event;
++};
 +
-+/*
-+ * Halt execution of this domain (all VCPUs) and notify the system controller.
-+ * @arg == pointer to sched_shutdown structure.
-+ */
-+#define SCHEDOP_shutdown    2
-+struct sched_shutdown {
-+    unsigned int reason; /* SHUTDOWN_* */
++/* PC value that indicates a special code */
++#define XENOPROF_ESCAPE_CODE ~0UL
++/* Transient events for the xenoprof->oprofile cpu buf */
++#define XENOPROF_TRACE_BEGIN 1
++
++/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
++struct xenoprof_buf {
++    uint32_t event_head;
++    uint32_t event_tail;
++    uint32_t event_size;
++    uint32_t vcpu_id;
++    uint64_t xen_samples;
++    uint64_t kernel_samples;
++    uint64_t user_samples;
++    uint64_t lost_samples;
++    struct event_log event_log[1];
 +};
-+typedef struct sched_shutdown sched_shutdown_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
++#ifndef __XEN__
++typedef struct xenoprof_buf xenoprof_buf_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
++#endif
 +
-+/*
-+ * Poll a set of event-channel ports. Return when one or more are pending. An
-+ * optional timeout may be specified.
-+ * @arg == pointer to sched_poll structure.
-+ */
-+#define SCHEDOP_poll        3
-+struct sched_poll {
-+    XEN_GUEST_HANDLE(evtchn_port_t) ports;
-+    unsigned int nr_ports;
-+    uint64_t timeout;
++struct xenoprof_init {
++    int32_t  num_events;
++    int32_t  is_primary;
++    char cpu_type[XENOPROF_CPU_TYPE_SIZE];
 +};
-+typedef struct sched_poll sched_poll_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
++typedef struct xenoprof_init xenoprof_init_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
 +
-+/*
-+ * Declare a shutdown for another domain. The main use of this function is
-+ * in interpreting shutdown requests and reasons for fully-virtualized
-+ * domains.  A para-virtualized domain may use SCHEDOP_shutdown directly.
-+ * @arg == pointer to sched_remote_shutdown structure.
-+ */
-+#define SCHEDOP_remote_shutdown        4
-+struct sched_remote_shutdown {
-+    domid_t domain_id;         /* Remote domain ID */
-+    unsigned int reason;       /* SHUTDOWN_xxx reason */
++struct xenoprof_get_buffer {
++    int32_t  max_samples;
++    int32_t  nbuf;
++    int32_t  bufsize;
++    uint64_t buf_gmaddr;
 +};
-+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
++typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
 +
-+/*
-+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
-+ * software to determine the appropriate action. For the most part, Xen does
-+ * not care about the shutdown code.
-+ */
-+#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
-+#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
-+#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
-+#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
++struct xenoprof_counter {
++    uint32_t ind;
++    uint64_t count;
++    uint32_t enabled;
++    uint32_t event;
++    uint32_t hypervisor;
++    uint32_t kernel;
++    uint32_t user;
++    uint64_t unit_mask;
++};
++typedef struct xenoprof_counter xenoprof_counter_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
++
++typedef struct xenoprof_passive {
++    uint16_t domain_id;
++    int32_t  max_samples;
++    int32_t  nbuf;
++    int32_t  bufsize;
++    uint64_t buf_gmaddr;
++} xenoprof_passive_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
 +
-+#endif /* __XEN_PUBLIC_SCHED_H__ */
++
++#endif /* __XEN_PUBLIC_XENOPROF_H__ */
 +
 +/*
 + * Local variables:
@@ -103768,15 +143680,13 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/sysctl.h tmp-linux-2.6-xen.patch/include/xen/interface/sysctl.h
---- pristine-linux-2.6.18.2/include/xen/interface/sysctl.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/sysctl.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,198 @@
-+/******************************************************************************
-+ * sysctl.h
-+ * 
-+ * System management operations. For use by node control stack.
-+ * 
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xsm/acm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xsm/acm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,235 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -103795,295 +143705,210 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2002-2006, K Fraser
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
 + */
 +
-+#ifndef __XEN_PUBLIC_SYSCTL_H__
-+#define __XEN_PUBLIC_SYSCTL_H__
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
 +
-+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
-+#error "sysctl operations are intended for use by node control tools only"
-+#endif
++#include "../xen.h"
 +
-+#include "xen.h"
-+#include "domctl.h"
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
 +
-+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000003
++#ifdef ACM_DEBUG
++#  define printkd(fmt, args...) printk(fmt,## args)
++#else
++#  define printkd(fmt, args...)
++#endif
 +
-+/*
-+ * Read console content from Xen buffer ring.
-+ */
-+#define XEN_SYSCTL_readconsole       1
-+struct xen_sysctl_readconsole {
-+    /* IN variables. */
-+    uint32_t clear;                /* Non-zero -> clear after reading. */
-+    XEN_GUEST_HANDLE_64(char) buffer; /* Buffer start */
-+    /* IN/OUT variables. */
-+    uint32_t count;            /* In: Buffer size;  Out: Used buffer size  */
-+};
-+typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID  0x0
++#define ACM_DEFAULT_LOCAL_SSID  0x0
 +
-+/* Get trace buffers machine base address */
-+#define XEN_SYSCTL_tbuf_op           2
-+struct xen_sysctl_tbuf_op {
-+    /* IN variables */
-+#define XEN_SYSCTL_TBUFOP_get_info     0
-+#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
-+#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
-+#define XEN_SYSCTL_TBUFOP_set_size     3
-+#define XEN_SYSCTL_TBUFOP_enable       4
-+#define XEN_SYSCTL_TBUFOP_disable      5
-+    uint32_t cmd;
-+    /* IN/OUT variables */
-+    struct xenctl_cpumap cpu_mask;
-+    uint32_t             evt_mask;
-+    /* OUT variables */
-+    uint64_aligned_t buffer_mfn;
-+    uint32_t size;
-+};
-+typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
++/* Internal ACM ERROR types */
++#define ACM_OK     0
++#define ACM_UNDEF   -1
++#define ACM_INIT_SSID_ERROR  -2
++#define ACM_INIT_SOID_ERROR  -3
++#define ACM_ERROR          -4
 +
-+/*
-+ * Get physical information about the host machine
-+ */
-+#define XEN_SYSCTL_physinfo          3
-+struct xen_sysctl_physinfo {
-+    uint32_t threads_per_core;
-+    uint32_t cores_per_socket;
-+    uint32_t sockets_per_node;
-+    uint32_t nr_nodes;
-+    uint32_t cpu_khz;
-+    uint64_aligned_t total_pages;
-+    uint64_aligned_t free_pages;
-+    uint64_aligned_t scrub_pages;
-+    uint32_t hw_cap[8];
-+};
-+typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED        0
++#define ACM_ACCESS_DENIED           -111
++#define ACM_NULL_POINTER_ERROR      -200
 +
 +/*
-+ * Get the ID of the current scheduler.
-+ */
-+#define XEN_SYSCTL_sched_id          4
-+struct xen_sysctl_sched_id {
-+    /* OUT variable */
-+    uint32_t sched_id;
-+};
-+typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
++   Error codes reported in when trying to test for a new policy
++   These error codes are reported in an array of tuples where
++   each error code is followed by a parameter describing the error
++   more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION       0x100
++#define ACM_GNTTAB_SHARING_VIOLATION       0x101
++#define ACM_DOMAIN_LOOKUP                  0x102
++#define ACM_CHWALL_CONFLICT                0x103
++#define ACM_SSIDREF_IN_USE                 0x104
 +
-+/* Interface for controlling Xen software performance counters. */
-+#define XEN_SYSCTL_perfc_op          5
-+/* Sub-operations: */
-+#define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
-+#define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
-+struct xen_sysctl_perfc_desc {
-+    char         name[80];             /* name of perf counter */
-+    uint32_t     nr_vals;              /* number of values for this counter */
-+};
-+typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
-+typedef uint32_t xen_sysctl_perfc_val_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
 +
-+struct xen_sysctl_perfc_op {
-+    /* IN variables. */
-+    uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
-+    /* OUT variables. */
-+    uint32_t       nr_counters;       /*  number of counters description  */
-+    uint32_t       nr_vals;           /*  number of values  */
-+    /* counter information (or NULL) */
-+    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
-+    /* counter values (or NULL) */
-+    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
-+};
-+typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
 +
-+#define XEN_SYSCTL_getdomaininfolist 6
-+struct xen_sysctl_getdomaininfolist {
-+    /* IN variables. */
-+    domid_t               first_domain;
-+    uint32_t              max_domains;
-+    XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
-+    /* OUT variables. */
-+    uint32_t              num_domains;
-+};
-+typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++    ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
 +
-+/* Inject debug keys into Xen. */
-+#define XEN_SYSCTL_debug_keys        7
-+struct xen_sysctl_debug_keys {
-+    /* IN variables. */
-+    XEN_GUEST_HANDLE_64(char) keys;
-+    uint32_t nr_keys;
-+};
-+typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" :                        \
++    ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" :        \
++    ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++    ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++     "UNDEFINED"
 +
-+/* Get physical CPU information. */
-+#define XEN_SYSCTL_getcpuinfo        8
-+struct xen_sysctl_cpuinfo {
-+    uint64_t idletime;
-+};
-+typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); 
-+struct xen_sysctl_getcpuinfo {
-+    /* IN variables. */
-+    uint32_t max_cpus;
-+    XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
-+    /* OUT variables. */
-+    uint32_t nr_cpus;
-+}; 
-+typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); 
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 4
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION  1
 +
-+struct xen_sysctl {
-+    uint32_t cmd;
-+    uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
-+    union {
-+        struct xen_sysctl_readconsole       readconsole;
-+        struct xen_sysctl_tbuf_op           tbuf_op;
-+        struct xen_sysctl_physinfo          physinfo;
-+        struct xen_sysctl_sched_id          sched_id;
-+        struct xen_sysctl_perfc_op          perfc_op;
-+        struct xen_sysctl_getdomaininfolist getdomaininfolist;
-+        struct xen_sysctl_debug_keys        debug_keys;
-+        struct xen_sysctl_getcpuinfo        getcpuinfo;
-+        uint8_t                             pad[128];
-+    } u;
-+};
-+typedef struct xen_sysctl xen_sysctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
 +
-+#endif /* __XEN_PUBLIC_SYSCTL_H__ */
++/* hooks that are known to domains */
++#define ACMHOOK_none          0
++#define ACMHOOK_sharing       1
++#define ACMHOOK_authorization 2
++#define ACMHOOK_conflictset   3
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/trace.h tmp-linux-2.6-xen.patch/include/xen/interface/trace.h
---- pristine-linux-2.6.18.2/include/xen/interface/trace.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/trace.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,120 @@
-+/******************************************************************************
-+ * include/public/trace.h
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
 + *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the 
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
 + *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * conflict_aggregate_set[i] holds the number of
++ *   running domains that have a conflict with type i.
 + *
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ * Copyright (C) 2005 Bin Ren
++ * running_types[i] holds the number of running domains
++ *        that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ *    with type i and is "1" otherwise.
 + */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC  0x0001debc
 +
-+#ifndef __XEN_PUBLIC_TRACE_H__
-+#define __XEN_PUBLIC_TRACE_H__
++/* size of the SHA1 hash identifying the XML policy from which the
++   binary policy was created */
++#define ACM_SHA1_HASH_SIZE    20
 +
-+/* Trace classes */
-+#define TRC_CLS_SHIFT 16
-+#define TRC_GEN     0x0001f000    /* General trace            */
-+#define TRC_SCHED   0x0002f000    /* Xen Scheduler trace      */
-+#define TRC_DOM0OP  0x0004f000    /* Xen DOM0 operation trace */
-+#define TRC_HVM     0x0008f000    /* Xen HVM trace            */
-+#define TRC_MEM     0x0010f000    /* Xen memory trace         */
-+#define TRC_ALL     0xfffff000
++/* each offset in bytes from start of the struct they
++ * are part of */
 +
-+/* Trace subclasses */
-+#define TRC_SUBCLS_SHIFT 12
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++    uint32_t major;
++    uint32_t minor;
++};
 +
-+/* trace subclasses for SVM */
-+#define TRC_HVM_ENTRYEXIT 0x00081000   /* VMENTRY and #VMEXIT       */
-+#define TRC_HVM_HANDLER   0x00082000   /* various HVM handlers      */
 +
-+/* Trace events per class */
-+#define TRC_LOST_RECORDS        (TRC_GEN + 1)
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++    uint32_t magic;
++    uint32_t policy_version; /* ACM_POLICY_VERSION */
++    uint32_t len;
++    uint32_t policy_reference_offset;
++    uint32_t primary_policy_code;
++    uint32_t primary_buffer_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_buffer_offset;
++    struct acm_policy_version xml_pol_version; /* add in V3 */
++    uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */
++};
 +
-+#define TRC_SCHED_DOM_ADD       (TRC_SCHED +  1)
-+#define TRC_SCHED_DOM_REM       (TRC_SCHED +  2)
-+#define TRC_SCHED_SLEEP         (TRC_SCHED +  3)
-+#define TRC_SCHED_WAKE          (TRC_SCHED +  4)
-+#define TRC_SCHED_YIELD         (TRC_SCHED +  5)
-+#define TRC_SCHED_BLOCK         (TRC_SCHED +  6)
-+#define TRC_SCHED_SHUTDOWN      (TRC_SCHED +  7)
-+#define TRC_SCHED_CTL           (TRC_SCHED +  8)
-+#define TRC_SCHED_ADJDOM        (TRC_SCHED +  9)
-+#define TRC_SCHED_SWITCH        (TRC_SCHED + 10)
-+#define TRC_SCHED_S_TIMER_FN    (TRC_SCHED + 11)
-+#define TRC_SCHED_T_TIMER_FN    (TRC_SCHED + 12)
-+#define TRC_SCHED_DOM_TIMER_FN  (TRC_SCHED + 13)
-+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
-+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
 +
-+#define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
-+#define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
-+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
++struct acm_policy_reference_buffer {
++    uint32_t len;
++};
++
++struct acm_chwall_policy_buffer {
++    uint32_t policy_version; /* ACM_CHWALL_VERSION */
++    uint32_t policy_code;
++    uint32_t chwall_max_types;
++    uint32_t chwall_max_ssidrefs;
++    uint32_t chwall_max_conflictsets;
++    uint32_t chwall_ssid_offset;
++    uint32_t chwall_conflict_sets_offset;
++    uint32_t chwall_running_types_offset;
++    uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++    uint32_t policy_version; /* ACM_STE_VERSION */
++    uint32_t policy_code;
++    uint32_t ste_max_types;
++    uint32_t ste_max_ssidrefs;
++    uint32_t ste_ssid_offset;
++};
 +
-+/* trace events per subclass */
-+#define TRC_HVM_VMENTRY         (TRC_HVM_ENTRYEXIT + 0x01)
-+#define TRC_HVM_VMEXIT          (TRC_HVM_ENTRYEXIT + 0x02)
-+#define TRC_HVM_PF_XEN          (TRC_HVM_HANDLER + 0x01)
-+#define TRC_HVM_PF_INJECT       (TRC_HVM_HANDLER + 0x02)
-+#define TRC_HVM_INJ_EXC         (TRC_HVM_HANDLER + 0x03)
-+#define TRC_HVM_INJ_VIRQ        (TRC_HVM_HANDLER + 0x04)
-+#define TRC_HVM_REINJ_VIRQ      (TRC_HVM_HANDLER + 0x05)
-+#define TRC_HVM_IO_READ         (TRC_HVM_HANDLER + 0x06)
-+#define TRC_HVM_IO_WRITE        (TRC_HVM_HANDLER + 0x07)
-+#define TRC_HVM_CR_READ         (TRC_HVM_HANDLER + 0x08)
-+#define TRC_HVM_CR_WRITE        (TRC_HVM_HANDLER + 0x09)
-+#define TRC_HVM_DR_READ         (TRC_HVM_HANDLER + 0x0A)
-+#define TRC_HVM_DR_WRITE        (TRC_HVM_HANDLER + 0x0B)
-+#define TRC_HVM_MSR_READ        (TRC_HVM_HANDLER + 0x0C)
-+#define TRC_HVM_MSR_WRITE       (TRC_HVM_HANDLER + 0x0D)
-+#define TRC_HVM_CPUID           (TRC_HVM_HANDLER + 0x0E)
-+#define TRC_HVM_INTR            (TRC_HVM_HANDLER + 0x0F)
-+#define TRC_HVM_NMI             (TRC_HVM_HANDLER + 0x10)
-+#define TRC_HVM_SMI             (TRC_HVM_HANDLER + 0x11)
-+#define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
-+#define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
-+#define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
-+#define TRC_HVM_MCE             (TRC_HVM_HANDLER + 0x15)
++struct acm_stats_buffer {
++    uint32_t magic;
++    uint32_t len;
++    uint32_t primary_policy_code;
++    uint32_t primary_stats_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_stats_offset;
++};
 +
-+/* This structure represents a single trace buffer record. */
-+struct t_rec {
-+    uint64_t cycles;          /* cycle counter timestamp */
-+    uint32_t event;           /* event ID                */
-+    unsigned long data[5];    /* event data items        */
++struct acm_ste_stats_buffer {
++    uint32_t ec_eval_count;
++    uint32_t gt_eval_count;
++    uint32_t ec_denied_count;
++    uint32_t gt_denied_count;
++    uint32_t ec_cachehit_count;
++    uint32_t gt_cachehit_count;
 +};
 +
-+/*
-+ * This structure contains the metadata for a single trace buffer.  The head
-+ * field, indexes into an array of struct t_rec's.
-+ */
-+struct t_buf {
-+    uint32_t cons;      /* Next item to be consumed by control tools. */
-+    uint32_t prod;      /* Next item to be produced by Xen.           */
-+    /* 'nr_recs' records follow immediately after the meta-data header.    */
++struct acm_ssid_buffer {
++    uint32_t len;
++    ssidref_t ssidref;
++    uint32_t policy_reference_offset;
++    uint32_t primary_policy_code;
++    uint32_t primary_max_types;
++    uint32_t primary_types_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_max_types;
++    uint32_t secondary_types_offset;
 +};
 +
-+#endif /* __XEN_PUBLIC_TRACE_H__ */
++#endif
 +
 +/*
 + * Local variables:
@@ -104094,15 +143919,13 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/vcpu.h tmp-linux-2.6-xen.patch/include/xen/interface/vcpu.h
---- pristine-linux-2.6.18.2/include/xen/interface/vcpu.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/vcpu.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,192 @@
-+/******************************************************************************
-+ * vcpu.h
-+ * 
-+ * VCPU initialisation, query, and hotplug.
-+ * 
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xsm/acm_ops.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xsm/acm_ops.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,159 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
 + * of this software and associated documentation files (the "Software"), to
 + * deal in the Software without restriction, including without limitation the
@@ -104121,165 +143944,134 @@
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
 + */
 +
-+#ifndef __XEN_PUBLIC_VCPU_H__
-+#define __XEN_PUBLIC_VCPU_H__
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
 +
-+/*
-+ * Prototype for this hypercall is:
-+ *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
-+ * @cmd        == VCPUOP_??? (VCPU operation).
-+ * @vcpuid     == VCPU to operate on.
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
-+ */
++#include "../xen.h"
++#include "acm.h"
 +
 +/*
-+ * Initialise a VCPU. Each VCPU can be initialised only once. A 
-+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
-+ * 
-+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
-+ *               state for the VCPU.
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
 + */
-+#define VCPUOP_initialise            0
++#define ACM_INTERFACE_VERSION   0xAAAA000A
 +
-+/*
-+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
-+ * if the VCPU has not been initialised (VCPUOP_initialise).
-+ */
-+#define VCPUOP_up                    1
++/************************************************************************/
 +
 +/*
-+ * Bring down a VCPU (i.e., make it non-runnable).
-+ * There are a few caveats that callers should observe:
-+ *  1. This operation may return, and VCPU_is_up may return false, before the
-+ *     VCPU stops running (i.e., the command is asynchronous). It is a good
-+ *     idea to ensure that the VCPU has entered a non-critical loop before
-+ *     bringing it down. Alternatively, this operation is guaranteed
-+ *     synchronous if invoked by the VCPU itself.
-+ *  2. After a VCPU is initialised, there is currently no way to drop all its
-+ *     references to domain memory. Even a VCPU that is down still holds
-+ *     memory references via its pagetable base pointer and GDT. It is good
-+ *     practise to move a VCPU onto an 'idle' or default page table, LDT and
-+ *     GDT before bringing it down.
++ * Prototype for this hypercall is:
++ *  int acm_op(int cmd, void *args)
++ * @cmd  == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
 + */
-+#define VCPUOP_down                  2
 +
-+/* Returns 1 if the given VCPU is up. */
-+#define VCPUOP_is_up                 3
 +
-+/*
-+ * Return information about the state and running time of a VCPU.
-+ * @extra_arg == pointer to vcpu_runstate_info structure.
-+ */
-+#define VCPUOP_get_runstate_info     4
-+struct vcpu_runstate_info {
-+    /* VCPU's current state (RUNSTATE_*). */
-+    int      state;
-+    /* When was current state entered (system time, ns)? */
-+    uint64_t state_entry_time;
-+    /*
-+     * Time spent in each RUNSTATE_* (ns). The sum of these times is
-+     * guaranteed not to drift from system time.
-+     */
-+    uint64_t time[4];
++#define ACMOP_setpolicy         1
++struct acm_setpolicy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pushcache;
++    uint32_t pushcache_size;
 +};
-+typedef struct vcpu_runstate_info vcpu_runstate_info_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
 +
-+/* VCPU is currently running on a physical CPU. */
-+#define RUNSTATE_running  0
 +
-+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
-+#define RUNSTATE_runnable 1
++#define ACMOP_getpolicy         2
++struct acm_getpolicy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pullcache;
++    uint32_t pullcache_size;
++};
 +
-+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
-+#define RUNSTATE_blocked  2
 +
-+/*
-+ * VCPU is not runnable, but it is not blocked.
-+ * This is a 'catch all' state for things like hotplug and pauses by the
-+ * system administrator (or for critical sections in the hypervisor).
-+ * RUNSTATE_blocked dominates this state (it is the preferred state).
-+ */
-+#define RUNSTATE_offline  3
++#define ACMOP_dumpstats         3
++struct acm_dumpstats {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) pullcache;
++    uint32_t pullcache_size;
++};
 +
-+/*
-+ * Register a shared memory area from which the guest may obtain its own
-+ * runstate information without needing to execute a hypercall.
-+ * Notes:
-+ *  1. The registered address may be virtual or physical or guest handle,
-+ *     depending on the platform. Virtual address or guest handle should be
-+ *     registered on x86 systems.
-+ *  2. Only one shared area may be registered per VCPU. The shared area is
-+ *     updated by the hypervisor each time the VCPU is scheduled. Thus
-+ *     runstate.state will always be RUNSTATE_running and
-+ *     runstate.state_entry_time will indicate the system time at which the
-+ *     VCPU was last scheduled to run.
-+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
-+ */
-+#define VCPUOP_register_runstate_memory_area 5
-+struct vcpu_register_runstate_memory_area {
++
++#define ACMOP_getssid           4
++#define ACM_GETBY_ssidref  1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++    /* IN */
++    uint32_t get_ssid_by; /* ACM_GETBY_* */
 +    union {
-+        XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
-+        struct vcpu_runstate_info *v;
-+        uint64_t p;
-+    } addr;
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id;
++    XEN_GUEST_HANDLE_64(void) ssidbuf;
++    uint32_t ssidbuf_size;
 +};
-+typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
 +
-+/*
-+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
-+ * which can be set via these commands. Periods smaller than one millisecond
-+ * may not be supported.
-+ */
-+#define VCPUOP_set_periodic_timer    6 /* arg == vcpu_set_periodic_timer_t */
-+#define VCPUOP_stop_periodic_timer   7 /* arg == NULL */
-+struct vcpu_set_periodic_timer {
-+    uint64_t period_ns;
++#define ACMOP_getdecision      5
++struct acm_getdecision {
++    /* IN */
++    uint32_t get_decision_by1; /* ACM_GETBY_* */
++    uint32_t get_decision_by2; /* ACM_GETBY_* */
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id1;
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id2;
++    uint32_t hook;
++    /* OUT */
++    uint32_t acm_decision;
 +};
-+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
 +
-+/*
-+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
-+ * timer which can be set via these commands.
-+ */
-+#define VCPUOP_set_singleshot_timer  8 /* arg == vcpu_set_singleshot_timer_t */
-+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
-+struct vcpu_set_singleshot_timer {
-+    uint64_t timeout_abs_ns;   /* Absolute system time value in nanoseconds. */
-+    uint32_t flags;            /* VCPU_SSHOTTMR_??? */
++
++#define ACMOP_chgpolicy        6
++struct acm_change_policy {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) policy_pushcache;
++    uint32_t policy_pushcache_size;
++    XEN_GUEST_HANDLE_64(void) del_array;
++    uint32_t delarray_size;
++    XEN_GUEST_HANDLE_64(void) chg_array;
++    uint32_t chgarray_size;
++    /* OUT */
++    /* array with error code */
++    XEN_GUEST_HANDLE_64(void) err_array;
++    uint32_t errarray_size;
 +};
-+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
 +
-+/* Flags to VCPUOP_set_singleshot_timer. */
-+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
-+#define _VCPU_SSHOTTMR_future (0)
-+#define VCPU_SSHOTTMR_future  (1U << _VCPU_SSHOTTMR_future)
++#define ACMOP_relabeldoms       7
++struct acm_relabel_doms {
++    /* IN */
++    XEN_GUEST_HANDLE_64(void) relabel_map;
++    uint32_t relabel_map_size;
++    /* OUT */
++    XEN_GUEST_HANDLE_64(void) err_array;
++    uint32_t errarray_size;
++};
 +
-+/* 
-+ * Register a memory location in the guest address space for the
-+ * vcpu_info structure.  This allows the guest to place the vcpu_info
-+ * structure in a convenient place, such as in a per-cpu data area.
-+ * The pointer need not be page aligned, but the structure must not
-+ * cross a page boundary.
-+ *
-+ * This may be called only once per vcpu.
-+ */
-+#define VCPUOP_register_vcpu_info   10  /* arg == struct vcpu_info */
-+struct vcpu_register_vcpu_info {
-+    uint64_t mfn;    /* mfn of page to place vcpu_info */
-+    uint32_t offset; /* offset within page */
-+    uint32_t rsvd;   /* unused */
++/* future interface to Xen */
++struct xen_acmctl {
++    uint32_t cmd;
++    uint32_t interface_version;
++    union {
++        struct acm_setpolicy     setpolicy;
++        struct acm_getpolicy     getpolicy;
++        struct acm_dumpstats     dumpstats;
++        struct acm_getssid       getssid;
++        struct acm_getdecision   getdecision;
++        struct acm_change_policy change_policy;
++        struct acm_relabel_doms  relabel_doms;
++    } u;
 +};
-+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
 +
-+#endif /* __XEN_PUBLIC_VCPU_H__ */
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
++
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
 +
 +/*
 + * Local variables:
@@ -104290,2036 +144082,2077 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/version.h tmp-linux-2.6-xen.patch/include/xen/interface/version.h
---- pristine-linux-2.6.18.2/include/xen/interface/version.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/version.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,91 @@
-+/******************************************************************************
-+ * version.h
-+ * 
-+ * Xen version, type, and compile information.
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/interface/xsm/flask_op.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/interface/xsm/flask_op.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,45 @@
++/*
++ *  This file contains the flask_op hypercall commands and definitions.
 + *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
++ *  Author:  George Coker, <gscoker at alpha.ncsc.mil>
 + *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License version 2,
++ *  as published by the Free Software Foundation.
++ */
++
++#ifndef __FLASK_OP_H__
++#define __FLASK_OP_H__
++
++#define FLASK_LOAD              1
++#define FLASK_GETENFORCE        2
++#define FLASK_SETENFORCE        3
++#define FLASK_CONTEXT_TO_SID    4
++#define FLASK_SID_TO_CONTEXT    5
++#define FLASK_ACCESS            6
++#define FLASK_CREATE            7
++#define FLASK_RELABEL           8
++#define FLASK_USER              9
++#define FLASK_POLICYVERS        10
++#define FLASK_GETBOOL           11
++#define FLASK_SETBOOL           12
++#define FLASK_COMMITBOOLS       13
++#define FLASK_MLS               14
++#define FLASK_DISABLE           15
++#define FLASK_GETAVC_THRESHOLD  16
++#define FLASK_SETAVC_THRESHOLD  17
++#define FLASK_AVC_HASHSTATS     18
++#define FLASK_AVC_CACHESTATS    19
++#define FLASK_MEMBER            20
++
++#define FLASK_LAST              FLASK_MEMBER
++
++typedef struct flask_op {
++    uint32_t  cmd;
++    uint32_t  size;
++    char      *buf;
++} flask_op_t;
++
++DEFINE_XEN_GUEST_HANDLE(flask_op_t);
++
++#endif
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/pcifront.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/pcifront.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,83 @@
++/*
++ * PCI Frontend - arch-dependendent declarations
 + *
-+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh at gmail.com>
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
 + */
++#ifndef __XEN_ASM_PCIFRONT_H__
++#define __XEN_ASM_PCIFRONT_H__
 +
-+#ifndef __XEN_PUBLIC_VERSION_H__
-+#define __XEN_PUBLIC_VERSION_H__
++#include <linux/spinlock.h>
 +
-+/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
++#ifdef __KERNEL__
 +
-+/* arg == NULL; returns major:minor (16:16). */
-+#define XENVER_version      0
++#ifndef __ia64__
 +
-+/* arg == xen_extraversion_t. */
-+#define XENVER_extraversion 1
-+typedef char xen_extraversion_t[16];
-+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
++struct pcifront_device;
++struct pci_bus;
 +
-+/* arg == xen_compile_info_t. */
-+#define XENVER_compile_info 2
-+struct xen_compile_info {
-+    char compiler[64];
-+    char compile_by[16];
-+    char compile_domain[32];
-+    char compile_date[32];
++struct pcifront_sd {
++	int domain;
++	struct pcifront_device *pdev;
 +};
-+typedef struct xen_compile_info xen_compile_info_t;
-+
-+#define XENVER_capabilities 3
-+typedef char xen_capabilities_info_t[1024];
-+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
 +
-+#define XENVER_changeset 4
-+typedef char xen_changeset_info_t[64];
-+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++	return sd->pdev;
++}
 +
-+#define XENVER_platform_parameters 5
-+struct xen_platform_parameters {
-+    unsigned long virt_start;
-+};
-+typedef struct xen_platform_parameters xen_platform_parameters_t;
++static inline void pcifront_init_sd(struct pcifront_sd *sd,
++				    unsigned int domain, unsigned int bus,
++				    struct pcifront_device *pdev)
++{
++	sd->domain = domain;
++	sd->pdev = pdev;
++}
 +
-+#define XENVER_get_features 6
-+struct xen_feature_info {
-+    unsigned int submap_idx;    /* IN: which 32-bit submap to return */
-+    uint32_t     submap;        /* OUT: 32-bit submap */
-+};
-+typedef struct xen_feature_info xen_feature_info_t;
++#if defined(CONFIG_PCI_DOMAINS)
++static inline int pci_domain_nr(struct pci_bus *bus)
++{
++	struct pcifront_sd *sd = bus->sysdata;
++	return sd->domain;
++}
++static inline int pci_proc_domain(struct pci_bus *bus)
++{
++	return pci_domain_nr(bus);
++}
++#endif /* CONFIG_PCI_DOMAINS */
 +
-+/* Declares the features reported by XENVER_get_features. */
-+#include "features.h"
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++						 struct pcifront_sd *sd)
++{
++}
 +
-+/* arg == NULL; returns host memory page size. */
-+#define XENVER_pagesize 7
++#else /* __ia64__ */
 +
-+/* arg == xen_domain_handle_t. */
-+#define XENVER_guest_handle 8
++#include <linux/acpi.h>
++#include <asm/pci.h>
++#define pcifront_sd pci_controller
 +
-+#endif /* __XEN_PUBLIC_VERSION_H__ */
++extern void xen_add_resource(struct pci_controller *, unsigned int,
++			     unsigned int, struct acpi_resource *);
++extern void xen_pcibios_setup_root_windows(struct pci_bus *,
++					   struct pci_controller *);
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/xencomm.h tmp-linux-2.6-xen.patch/include/xen/interface/xencomm.h
---- pristine-linux-2.6.18.2/include/xen/interface/xencomm.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/xencomm.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,41 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) IBM Corp. 2006
-+ */
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++	return (struct pcifront_device *)sd->platform_data;
++}
 +
-+#ifndef _XEN_XENCOMM_H_
-+#define _XEN_XENCOMM_H_
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++						 struct pcifront_sd *sd)
++{
++	xen_pcibios_setup_root_windows(bus, sd);
++}
 +
-+/* A xencomm descriptor is a scatter/gather list containing physical
-+ * addresses corresponding to a virtually contiguous memory area. The
-+ * hypervisor translates these physical addresses to machine addresses to copy
-+ * to and from the virtually contiguous area.
-+ */
++#endif /* __ia64__ */
 +
-+#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-+#define XENCOMM_INVALID (~0UL)
++extern struct rw_semaphore pci_bus_sem;
 +
-+struct xencomm_desc {
-+    uint32_t magic;
-+    uint32_t nr_addrs; /* the number of entries in address[] */
-+    uint64_t address[0];
-+};
++#endif /* __KERNEL__ */
 +
-+#endif /* _XEN_XENCOMM_H_ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/xen-compat.h tmp-linux-2.6-xen.patch/include/xen/interface/xen-compat.h
---- pristine-linux-2.6.18.2/include/xen/interface/xen-compat.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/xen-compat.h	2007-07-30 16:35:28.000000000 +0200
-@@ -0,0 +1,51 @@
++#endif /* __XEN_ASM_PCIFRONT_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/public/evtchn.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/public/evtchn.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,88 @@
 +/******************************************************************************
-+ * xen-compat.h
++ * evtchn.h
 + * 
-+ * Guest OS interface to Xen.  Compatibility layer.
++ * Interface to /dev/xen/evtchn.
++ * 
++ * Copyright (c) 2003-2005, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ *
++ * 
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Christian Limpach
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
-+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
-+#define __XEN_PUBLIC_XEN_COMPAT_H__
++#ifndef __LINUX_PUBLIC_EVTCHN_H__
++#define __LINUX_PUBLIC_EVTCHN_H__
 +
-+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030205
++/*
++ * Bind a fresh port to VIRQ @virq.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_VIRQ				\
++	_IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
++struct ioctl_evtchn_bind_virq {
++	unsigned int virq;
++};
 +
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+/* Xen is built with matching headers and implements the latest interface. */
-+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
-+#elif !defined(__XEN_INTERFACE_VERSION__)
-+/* Guests which do not specify a version get the legacy interface. */
-+#define __XEN_INTERFACE_VERSION__ 0x00000000
-+#endif
++/*
++ * Bind a fresh port to remote <@remote_domain, @remote_port>.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_INTERDOMAIN			\
++	_IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
++struct ioctl_evtchn_bind_interdomain {
++	unsigned int remote_domain, remote_port;
++};
 +
-+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
-+#error "These header files do not support the requested interface version."
-+#endif
++/*
++ * Allocate a fresh port for binding to @remote_domain.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_UNBOUND_PORT			\
++	_IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
++struct ioctl_evtchn_bind_unbound_port {
++	unsigned int remote_domain;
++};
 +
-+/* Fields defined as a Xen guest handle since 0x00030205. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
-+#define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type)
-+#else
-+#define XEN_GUEST_HANDLE_00030205(type) type *
-+#endif
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_UNBIND				\
++	_IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
++struct ioctl_evtchn_unbind {
++	unsigned int port;
++};
 +
-+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/xen.h tmp-linux-2.6-xen.patch/include/xen/interface/xen.h
---- pristine-linux-2.6.18.2/include/xen/interface/xen.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/xen.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,612 @@
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_NOTIFY				\
++	_IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
++struct ioctl_evtchn_notify {
++	unsigned int port;
++};
++
++/* Clear and reinitialise the event buffer. Clear error condition. */
++#define IOCTL_EVTCHN_RESET				\
++	_IOC(_IOC_NONE, 'E', 5, 0)
++
++#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/public/gntdev.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/public/gntdev.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,119 @@
 +/******************************************************************************
-+ * xen.h
++ * gntdev.h
 + * 
-+ * Guest OS interface to Xen.
++ * Interface to /dev/xen/gntdev.
++ * 
++ * Copyright (c) 2007, D G Murray
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
-+ *
++ * 
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004, K A Fraser
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
 +
-+#ifndef __XEN_PUBLIC_XEN_H__
-+#define __XEN_PUBLIC_XEN_H__
-+
-+#include "xen-compat.h"
++#ifndef __LINUX_PUBLIC_GNTDEV_H__
++#define __LINUX_PUBLIC_GNTDEV_H__
 +
-+#if defined(__i386__) || defined(__x86_64__)
-+#include "arch-x86/xen.h"
-+#elif defined(__ia64__)
-+#include "arch-ia64.h"
-+#elif defined(__powerpc__)
-+#include "arch-powerpc.h"
-+#else
-+#error "Unsupported architecture"
-+#endif
++struct ioctl_gntdev_grant_ref {
++	/* The domain ID of the grant to be mapped. */
++	uint32_t domid;
++	/* The grant reference of the grant to be mapped. */
++	uint32_t ref;
++};
 +
 +/*
-+ * HYPERCALLS
++ * Inserts the grant references into the mapping table of an instance
++ * of gntdev. N.B. This does not perform the mapping, which is deferred
++ * until mmap() is called with @index as the offset.
 + */
-+
-+#define __HYPERVISOR_set_trap_table        0
-+#define __HYPERVISOR_mmu_update            1
-+#define __HYPERVISOR_set_gdt               2
-+#define __HYPERVISOR_stack_switch          3
-+#define __HYPERVISOR_set_callbacks         4
-+#define __HYPERVISOR_fpu_taskswitch        5
-+#define __HYPERVISOR_sched_op_compat       6 /* compat since 0x00030101 */
-+#define __HYPERVISOR_platform_op           7
-+#define __HYPERVISOR_set_debugreg          8
-+#define __HYPERVISOR_get_debugreg          9
-+#define __HYPERVISOR_update_descriptor    10
-+#define __HYPERVISOR_memory_op            12
-+#define __HYPERVISOR_multicall            13
-+#define __HYPERVISOR_update_va_mapping    14
-+#define __HYPERVISOR_set_timer_op         15
-+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
-+#define __HYPERVISOR_xen_version          17
-+#define __HYPERVISOR_console_io           18
-+#define __HYPERVISOR_physdev_op_compat    19 /* compat since 0x00030202 */
-+#define __HYPERVISOR_grant_table_op       20
-+#define __HYPERVISOR_vm_assist            21
-+#define __HYPERVISOR_update_va_mapping_otherdomain 22
-+#define __HYPERVISOR_iret                 23 /* x86 only */
-+#define __HYPERVISOR_vcpu_op              24
-+#define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
-+#define __HYPERVISOR_mmuext_op            26
-+#define __HYPERVISOR_acm_op               27
-+#define __HYPERVISOR_nmi_op               28
-+#define __HYPERVISOR_sched_op             29
-+#define __HYPERVISOR_callback_op          30
-+#define __HYPERVISOR_xenoprof_op          31
-+#define __HYPERVISOR_event_channel_op     32
-+#define __HYPERVISOR_physdev_op           33
-+#define __HYPERVISOR_hvm_op               34
-+#define __HYPERVISOR_sysctl               35
-+#define __HYPERVISOR_domctl               36
-+#define __HYPERVISOR_kexec_op             37
-+
-+/* Architecture-specific hypercall definitions. */
-+#define __HYPERVISOR_arch_0               48
-+#define __HYPERVISOR_arch_1               49
-+#define __HYPERVISOR_arch_2               50
-+#define __HYPERVISOR_arch_3               51
-+#define __HYPERVISOR_arch_4               52
-+#define __HYPERVISOR_arch_5               53
-+#define __HYPERVISOR_arch_6               54
-+#define __HYPERVISOR_arch_7               55
++#define IOCTL_GNTDEV_MAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
++struct ioctl_gntdev_map_grant_ref {
++	/* IN parameters */
++	/* The number of grants to be mapped. */
++	uint32_t count;
++	uint32_t pad;
++	/* OUT parameters */
++	/* The offset to be used on a subsequent call to mmap(). */
++	uint64_t index;
++	/* Variable IN parameter. */
++	/* Array of grant references, of size @count. */
++	struct ioctl_gntdev_grant_ref refs[1];
++};
 +
 +/*
-+ * HYPERCALL COMPATIBILITY.
++ * Removes the grant references from the mapping table of an instance of
++ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
++ * before this ioctl is called, or an error will result.
 + */
++#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))       
++struct ioctl_gntdev_unmap_grant_ref {
++	/* IN parameters */
++	/* The offset was returned by the corresponding map operation. */
++	uint64_t index;
++	/* The number of pages to be unmapped. */
++	uint32_t count;
++	uint32_t pad;
++};
 +
-+/* New sched_op hypercall introduced in 0x00030101. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030101
-+#undef __HYPERVISOR_sched_op
-+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
-+#endif
-+
-+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030202
-+#undef __HYPERVISOR_event_channel_op
-+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
-+#undef __HYPERVISOR_physdev_op
-+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
-+#endif
-+
-+/* New platform_op hypercall introduced in 0x00030204. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030204
-+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
-+#endif
-+
-+/* 
-+ * VIRTUAL INTERRUPTS
-+ * 
-+ * Virtual interrupts that a guest OS may receive from Xen.
-+ * 
-+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
-+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
-+ * The latter can be allocated only once per guest: they must initially be
-+ * allocated to VCPU0 but can subsequently be re-bound.
++/*
++ * Returns the offset in the driver's address space that corresponds
++ * to @vaddr. This can be used to perform a munmap(), followed by an
++ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
++ * the caller. The number of pages that were allocated at the same time as
++ * @vaddr is returned in @count.
++ *
++ * N.B. Where more than one page has been mapped into a contiguous range, the
++ *      supplied @vaddr must correspond to the start of the range; otherwise
++ *      an error will result. It is only possible to munmap() the entire
++ *      contiguously-allocated range at once, and not any subrange thereof.
 + */
-+#define VIRQ_TIMER      0  /* V. Timebase update, and/or requested timeout.  */
-+#define VIRQ_DEBUG      1  /* V. Request guest to dump debug info.           */
-+#define VIRQ_CONSOLE    2  /* G. (DOM0) Bytes received on emergency console. */
-+#define VIRQ_DOM_EXC    3  /* G. (DOM0) Exceptional event for some domain.   */
-+#define VIRQ_TBUF       4  /* G. (DOM0) Trace buffer has records available.  */
-+#define VIRQ_DEBUGGER   6  /* G. (DOM0) A domain has paused for debugging.   */
-+#define VIRQ_XENOPROF   7  /* V. XenOprofile interrupt: new sample available */
-+#define VIRQ_CON_RING   8  /* G. (DOM0) Bytes received on console            */
-+
-+/* Architecture-specific VIRQ definitions. */
-+#define VIRQ_ARCH_0    16
-+#define VIRQ_ARCH_1    17
-+#define VIRQ_ARCH_2    18
-+#define VIRQ_ARCH_3    19
-+#define VIRQ_ARCH_4    20
-+#define VIRQ_ARCH_5    21
-+#define VIRQ_ARCH_6    22
-+#define VIRQ_ARCH_7    23
-+
-+#define NR_VIRQS       24
++#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
++_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
++struct ioctl_gntdev_get_offset_for_vaddr {
++	/* IN parameters */
++	/* The virtual address of the first mapped page in a range. */
++	uint64_t vaddr;
++	/* OUT parameters */
++	/* The offset that was used in the initial mmap() operation. */
++	uint64_t offset;
++	/* The number of pages mapped in the VM area that begins at @vaddr. */
++	uint32_t count;
++	uint32_t pad;
++};
 +
 +/*
-+ * MMU-UPDATE REQUESTS
-+ * 
-+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * ptr[1:0] specifies the appropriate MMU_* command.
-+ * 
-+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
-+ * Updates an entry in a page table. If updating an L1 table, and the new
-+ * table entry is valid/present, the mapped frame must belong to the FD, if
-+ * an FD has been specified. If attempting to map an I/O page then the
-+ * caller assumes the privilege of the FD.
-+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
-+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
-+ * ptr[:2]  -- Machine address of the page-table entry to modify.
-+ * val      -- Value to write.
-+ * 
-+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
-+ * Updates an entry in the machine->pseudo-physical mapping table.
-+ * ptr[:2]  -- Machine address within the frame whose mapping to modify.
-+ *             The frame must belong to the FD, if one is specified.
-+ * val      -- Value to write into the mapping entry.
++ * Sets the maximum number of grants that may mapped at once by this gntdev
++ * instance.
++ *
++ * N.B. This must be called before any other ioctl is performed on the device.
 + */
-+#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
-+#define MMU_MACHPHYS_UPDATE      1 /* ptr = MA of frame to modify entry for  */
++#define IOCTL_GNTDEV_SET_MAX_GRANTS \
++_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
++struct ioctl_gntdev_set_max_grants {
++	/* IN parameter */
++	/* The maximum number of grants that may be mapped at once. */
++	uint32_t count;
++};
 +
-+/*
-+ * MMU EXTENDED OPERATIONS
-+ * 
-+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * 
-+ * cmd: MMUEXT_(UN)PIN_*_TABLE
-+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
-+ *      The frame must belong to the FD, if one is specified.
-+ * 
-+ * cmd: MMUEXT_NEW_BASEPTR
-+ * mfn: Machine frame number of new page-table base to install in MMU.
-+ * 
-+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
-+ * mfn: Machine frame number of new page-table base to install in MMU
-+ *      when in user space.
-+ * 
-+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
-+ * No additional arguments. Flushes local TLB.
-+ * 
-+ * cmd: MMUEXT_INVLPG_LOCAL
-+ * linear_addr: Linear address to be flushed from the local TLB.
++#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/public/privcmd.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/public/privcmd.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,79 @@
++/******************************************************************************
++ * privcmd.h
 + * 
-+ * cmd: MMUEXT_TLB_FLUSH_MULTI
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * Interface to /proc/xen/privcmd.
 + * 
-+ * cmd: MMUEXT_INVLPG_MULTI
-+ * linear_addr: Linear address to be flushed.
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * Copyright (c) 2003-2005, K A Fraser
 + * 
-+ * cmd: MMUEXT_TLB_FLUSH_ALL
-+ * No additional arguments. Flushes all VCPUs' TLBs.
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
 + * 
-+ * cmd: MMUEXT_INVLPG_ALL
-+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
 + * 
-+ * cmd: MMUEXT_FLUSH_CACHE
-+ * No additional arguments. Writes back and flushes cache contents.
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
 + * 
-+ * cmd: MMUEXT_SET_LDT
-+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
-+ * nr_ents: Number of entries in LDT.
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + */
-+#define MMUEXT_PIN_L1_TABLE      0
-+#define MMUEXT_PIN_L2_TABLE      1
-+#define MMUEXT_PIN_L3_TABLE      2
-+#define MMUEXT_PIN_L4_TABLE      3
-+#define MMUEXT_UNPIN_TABLE       4
-+#define MMUEXT_NEW_BASEPTR       5
-+#define MMUEXT_TLB_FLUSH_LOCAL   6
-+#define MMUEXT_INVLPG_LOCAL      7
-+#define MMUEXT_TLB_FLUSH_MULTI   8
-+#define MMUEXT_INVLPG_MULTI      9
-+#define MMUEXT_TLB_FLUSH_ALL    10
-+#define MMUEXT_INVLPG_ALL       11
-+#define MMUEXT_FLUSH_CACHE      12
-+#define MMUEXT_SET_LDT          13
-+#define MMUEXT_NEW_USER_BASEPTR 15
 +
-+#ifndef __ASSEMBLY__
-+struct mmuext_op {
-+    unsigned int cmd;
-+    union {
-+        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-+        xen_pfn_t     mfn;
-+        /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
-+        unsigned long linear_addr;
-+    } arg1;
-+    union {
-+        /* SET_LDT */
-+        unsigned int nr_ents;
-+        /* TLB_FLUSH_MULTI, INVLPG_MULTI */
-+        XEN_GUEST_HANDLE_00030205(void) vcpumask;
-+    } arg2;
-+};
-+typedef struct mmuext_op mmuext_op_t;
-+DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
-+#endif
++#ifndef __LINUX_PUBLIC_PRIVCMD_H__
++#define __LINUX_PUBLIC_PRIVCMD_H__
 +
-+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
-+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
-+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
-+#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
-+#define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
-+#define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
-+#define UVMF_FLUSHTYPE_MASK     (3UL<<0)
-+#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
-+#define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
-+#define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
++#include <linux/types.h>
 +
-+/*
-+ * Commands to HYPERVISOR_console_io().
-+ */
-+#define CONSOLEIO_write         0
-+#define CONSOLEIO_read          1
++#ifndef __user
++#define __user
++#endif
 +
-+/*
-+ * Commands to HYPERVISOR_vm_assist().
-+ */
-+#define VMASST_CMD_enable                0
-+#define VMASST_CMD_disable               1
++typedef struct privcmd_hypercall
++{
++	__u64 op;
++	__u64 arg[5];
++} privcmd_hypercall_t;
 +
-+/* x86/32 guests: simulate full 4GB segment limits. */
-+#define VMASST_TYPE_4gb_segments         0
++typedef struct privcmd_mmap_entry {
++	__u64 va;
++	__u64 mfn;
++	__u64 npages;
++} privcmd_mmap_entry_t; 
 +
-+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
-+#define VMASST_TYPE_4gb_segments_notify  1
++typedef struct privcmd_mmap {
++	int num;
++	domid_t dom; /* target domain */
++	privcmd_mmap_entry_t __user *entry;
++} privcmd_mmap_t; 
++
++typedef struct privcmd_mmapbatch {
++	int num;     /* number of pages to populate */
++	domid_t dom; /* target domain */
++	__u64 addr;  /* virtual address */
++	xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
++} privcmd_mmapbatch_t; 
 +
 +/*
-+ * x86 guests: support writes to bottom-level PTEs.
-+ * NB1. Page-directory entries cannot be written.
-+ * NB2. Guest must continue to remove all writable mappings of PTEs.
++ * @cmd: IOCTL_PRIVCMD_HYPERCALL
++ * @arg: &privcmd_hypercall_t
++ * Return: Value returned from execution of the specified hypercall.
 + */
-+#define VMASST_TYPE_writable_pagetables  2
++#define IOCTL_PRIVCMD_HYPERCALL					\
++	_IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
++#define IOCTL_PRIVCMD_MMAP					\
++	_IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
++#define IOCTL_PRIVCMD_MMAPBATCH					\
++	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
 +
-+/* x86/PAE guests: support PDPTs above 4GB. */
-+#define VMASST_TYPE_pae_extended_cr3     3
++#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/xen_proc.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/xen_proc.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,12 @@
 +
-+#define MAX_VMASST_TYPE                  3
++#ifndef __ASM_XEN_PROC_H__
++#define __ASM_XEN_PROC_H__
 +
-+#ifndef __ASSEMBLY__
++#include <linux/proc_fs.h>
 +
-+typedef uint16_t domid_t;
++extern struct proc_dir_entry *create_xen_proc_entry(
++	const char *name, mode_t mode);
++extern void remove_xen_proc_entry(
++	const char *name);
++
++#endif /* __ASM_XEN_PROC_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/xenbus.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/xenbus.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,307 @@
++/******************************************************************************
++ * xenbus.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
 +
-+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
-+#define DOMID_FIRST_RESERVED (0x7FF0U)
++#ifndef _XEN_XENBUS_H
++#define _XEN_XENBUS_H
 +
-+/* DOMID_SELF is used in certain contexts to refer to oneself. */
-+#define DOMID_SELF (0x7FF0U)
++#include <linux/device.h>
++#include <linux/notifier.h>
++#include <linux/mutex.h>
++#include <linux/completion.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/xenbus.h>
++#include <xen/interface/io/xs_wire.h>
 +
-+/*
-+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
-+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
-+ * is useful to ensure that no mappings to the OS's own heap are accidentally
-+ * installed. (e.g., in Linux this could cause havoc as reference counts
-+ * aren't adjusted on the I/O-mapping code path).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
-+ * be specified by any calling domain.
-+ */
-+#define DOMID_IO   (0x7FF1U)
++/* Register callback to watch this node. */
++struct xenbus_watch
++{
++	struct list_head list;
 +
-+/*
-+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
-+ * Xen's heap space (e.g., the machine_to_phys table).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
-+ * the caller is privileged.
-+ */
-+#define DOMID_XEN  (0x7FF2U)
++	/* Path being watched. */
++	const char *node;
 +
-+/*
-+ * Send an array of these to HYPERVISOR_mmu_update().
-+ * NB. The fields are natural pointer/address size for this architecture.
-+ */
-+struct mmu_update {
-+    uint64_t ptr;       /* Machine address of PTE. */
-+    uint64_t val;       /* New contents of PTE.    */
-+};
-+typedef struct mmu_update mmu_update_t;
-+DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
++	/* Callback (executed in a process context with no locks held). */
++	void (*callback)(struct xenbus_watch *,
++			 const char **vec, unsigned int len);
 +
-+/*
-+ * Send an array of these to HYPERVISOR_multicall().
-+ * NB. The fields are natural register size for this architecture.
-+ */
-+struct multicall_entry {
-+    unsigned long op, result;
-+    unsigned long args[6];
++	/* See XBWF_ definitions below. */
++	unsigned long flags;
 +};
-+typedef struct multicall_entry multicall_entry_t;
-+DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
 +
 +/*
-+ * Event channel endpoints per domain:
-+ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
++ * Execute callback in its own kthread. Useful if the callback is long
++ * running or heavily serialised, to avoid taking out the main xenwatch thread
++ * for a long period of time (or even unwittingly causing a deadlock).
 + */
-+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
++#define XBWF_new_thread	1
 +
-+struct vcpu_time_info {
-+    /*
-+     * Updates to the following values are preceded and followed by an
-+     * increment of 'version'. The guest can therefore detect updates by
-+     * looking for changes to 'version'. If the least-significant bit of
-+     * the version number is set then an update is in progress and the guest
-+     * must wait to read a consistent set of values.
-+     * The correct way to interact with the version number is similar to
-+     * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
-+     */
-+    uint32_t version;
-+    uint32_t pad0;
-+    uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
-+    uint64_t system_time;     /* Time, in nanosecs, since boot.    */
-+    /*
-+     * Current system time:
-+     *   system_time +
-+     *   ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
-+     * CPU frequency (Hz):
-+     *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
-+     */
-+    uint32_t tsc_to_system_mul;
-+    int8_t   tsc_shift;
-+    int8_t   pad1[3];
-+}; /* 32 bytes */
-+typedef struct vcpu_time_info vcpu_time_info_t;
++/* A xenbus device. */
++struct xenbus_device {
++	const char *devicetype;
++	const char *nodename;
++	const char *otherend;
++	int otherend_id;
++	struct xenbus_watch otherend_watch;
++	struct device dev;
++	enum xenbus_state state;
++	struct completion down;
++};
 +
-+struct vcpu_info {
-+    /*
-+     * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
-+     * a pending notification for a particular VCPU. It is then cleared 
-+     * by the guest OS /before/ checking for pending work, thus avoiding
-+     * a set-and-check race. Note that the mask is only accessed by Xen
-+     * on the CPU that is currently hosting the VCPU. This means that the
-+     * pending and mask flags can be updated by the guest without special
-+     * synchronisation (i.e., no need for the x86 LOCK prefix).
-+     * This may seem suboptimal because if the pending flag is set by
-+     * a different CPU then an IPI may be scheduled even when the mask
-+     * is set. However, note:
-+     *  1. The task of 'interrupt holdoff' is covered by the per-event-
-+     *     channel mask bits. A 'noisy' event that is continually being
-+     *     triggered can be masked at source at this very precise
-+     *     granularity.
-+     *  2. The main purpose of the per-VCPU mask is therefore to restrict
-+     *     reentrant execution: whether for concurrency control, or to
-+     *     prevent unbounded stack usage. Whatever the purpose, we expect
-+     *     that the mask will be asserted only for short periods at a time,
-+     *     and so the likelihood of a 'spurious' IPI is suitably small.
-+     * The mask is read before making an event upcall to the guest: a
-+     * non-zero mask therefore guarantees that the VCPU will not receive
-+     * an upcall activation. The mask is cleared when the VCPU requests
-+     * to block: this avoids wakeup-waiting races.
-+     */
-+    uint8_t evtchn_upcall_pending;
-+    uint8_t evtchn_upcall_mask;
-+    unsigned long evtchn_pending_sel;
-+    struct arch_vcpu_info arch;
-+    struct vcpu_time_info time;
-+}; /* 64 bytes (x86) */
-+#ifndef __XEN__
-+typedef struct vcpu_info vcpu_info_t;
-+#endif
++static inline struct xenbus_device *to_xenbus_device(struct device *dev)
++{
++	return container_of(dev, struct xenbus_device, dev);
++}
 +
-+/*
-+ * Xen/kernel shared data -- pointer provided in start_info.
-+ *
-+ * This structure is defined to be both smaller than a page, and the
-+ * only data on the shared page, but may vary in actual size even within
-+ * compatible Xen versions; guests should not rely on the size
-+ * of this structure remaining constant.
-+ */
-+struct shared_info {
-+    struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
++struct xenbus_device_id
++{
++	/* .../device/<device_type>/<identifier> */
++	char devicetype[32]; 	/* General class of device. */
++};
 +
-+    /*
-+     * A domain can create "event channels" on which it can send and receive
-+     * asynchronous event notifications. There are three classes of event that
-+     * are delivered by this mechanism:
-+     *  1. Bi-directional inter- and intra-domain connections. Domains must
-+     *     arrange out-of-band to set up a connection (usually by allocating
-+     *     an unbound 'listener' port and avertising that via a storage service
-+     *     such as xenstore).
-+     *  2. Physical interrupts. A domain with suitable hardware-access
-+     *     privileges can bind an event-channel port to a physical interrupt
-+     *     source.
-+     *  3. Virtual interrupts ('events'). A domain can bind an event-channel
-+     *     port to a virtual interrupt source, such as the virtual-timer
-+     *     device or the emergency console.
-+     * 
-+     * Event channels are addressed by a "port index". Each channel is
-+     * associated with two bits of information:
-+     *  1. PENDING -- notifies the domain that there is a pending notification
-+     *     to be processed. This bit is cleared by the guest.
-+     *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
-+     *     will cause an asynchronous upcall to be scheduled. This bit is only
-+     *     updated by the guest. It is read-only within Xen. If a channel
-+     *     becomes pending while the channel is masked then the 'edge' is lost
-+     *     (i.e., when the channel is unmasked, the guest must manually handle
-+     *     pending notifications as no upcall will be scheduled by Xen).
-+     * 
-+     * To expedite scanning of pending notifications, any 0->1 pending
-+     * transition on an unmasked channel causes a corresponding bit in a
-+     * per-vcpu selector word to be set. Each bit in the selector covers a
-+     * 'C long' in the PENDING bitfield array.
-+     */
-+    unsigned long evtchn_pending[sizeof(unsigned long) * 8];
-+    unsigned long evtchn_mask[sizeof(unsigned long) * 8];
++/* A xenbus driver. */
++struct xenbus_driver {
++	char *name;
++	struct module *owner;
++	const struct xenbus_device_id *ids;
++	int (*probe)(struct xenbus_device *dev,
++		     const struct xenbus_device_id *id);
++	void (*otherend_changed)(struct xenbus_device *dev,
++				 enum xenbus_state backend_state);
++	int (*remove)(struct xenbus_device *dev);
++	int (*suspend)(struct xenbus_device *dev);
++	int (*suspend_cancel)(struct xenbus_device *dev);
++	int (*resume)(struct xenbus_device *dev);
++	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
++	struct device_driver driver;
++	int (*read_otherend_details)(struct xenbus_device *dev);
++	int (*is_ready)(struct xenbus_device *dev);
++};
 +
-+    /*
-+     * Wallclock time: updated only by control software. Guests should base
-+     * their gettimeofday() syscall on this wallclock-base value.
-+     */
-+    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
-+    uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
-+    uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
++static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
++{
++	return container_of(drv, struct xenbus_driver, driver);
++}
 +
-+    struct arch_shared_info arch;
++int xenbus_register_frontend(struct xenbus_driver *drv);
++int xenbus_register_backend(struct xenbus_driver *drv);
++void xenbus_unregister_driver(struct xenbus_driver *drv);
 +
++struct xenbus_transaction
++{
++	u32 id;
 +};
-+#ifndef __XEN__
-+typedef struct shared_info shared_info_t;
-+#endif
-+
-+/*
-+ * Start-of-day memory layout:
-+ *  1. The domain is started within contiguous virtual-memory region.
-+ *  2. The contiguous region ends on an aligned 4MB boundary.
-+ *  3. This the order of bootstrap elements in the initial virtual region:
-+ *      a. relocated kernel image
-+ *      b. initial ram disk              [mod_start, mod_len]
-+ *      c. list of allocated page frames [mfn_list, nr_pages]
-+ *      d. start_info_t structure        [register ESI (x86)]
-+ *      e. bootstrap page tables         [pt_base, CR3 (x86)]
-+ *      f. bootstrap stack               [register ESP (x86)]
-+ *  4. Bootstrap elements are packed together, but each is 4kB-aligned.
-+ *  5. The initial ram disk may be omitted.
-+ *  6. The list of page frames forms a contiguous 'pseudo-physical' memory
-+ *     layout for the domain. In particular, the bootstrap virtual-memory
-+ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
-+ *  7. All bootstrap elements are mapped read-writable for the guest OS. The
-+ *     only exception is the bootstrap page table, which is mapped read-only.
-+ *  8. There is guaranteed to be at least 512kB padding after the final
-+ *     bootstrap element. If necessary, the bootstrap virtual region is
-+ *     extended by an extra 4MB to ensure this.
-+ */
 +
-+#define MAX_GUEST_CMDLINE 1024
-+struct start_info {
-+    /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
-+    char magic[32];             /* "xen-<version>-<platform>".            */
-+    unsigned long nr_pages;     /* Total pages allocated to this domain.  */
-+    unsigned long shared_info;  /* MACHINE address of shared info struct. */
-+    uint32_t flags;             /* SIF_xxx flags.                         */
-+    xen_pfn_t store_mfn;        /* MACHINE page number of shared page.    */
-+    uint32_t store_evtchn;      /* Event channel for store communication. */
-+    union {
-+        struct {
-+            xen_pfn_t mfn;      /* MACHINE page number of console page.   */
-+            uint32_t  evtchn;   /* Event channel for console page.        */
-+        } domU;
-+        struct {
-+            uint32_t info_off;  /* Offset of console_info struct.         */
-+            uint32_t info_size; /* Size of console_info struct from start.*/
-+        } dom0;
-+    } console;
-+    /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
-+    unsigned long pt_base;      /* VIRTUAL address of page directory.     */
-+    unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
-+    unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
-+    unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
-+    unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
-+    int8_t cmd_line[MAX_GUEST_CMDLINE];
-+};
-+typedef struct start_info start_info_t;
++/* Nil transaction ID. */
++#define XBT_NIL ((struct xenbus_transaction) { 0 })
 +
-+/* New console union for dom0 introduced in 0x00030203. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
-+#define console_mfn    console.domU.mfn
-+#define console_evtchn console.domU.evtchn
-+#endif
++char **xenbus_directory(struct xenbus_transaction t,
++			const char *dir, const char *node, unsigned int *num);
++void *xenbus_read(struct xenbus_transaction t,
++		  const char *dir, const char *node, unsigned int *len);
++int xenbus_write(struct xenbus_transaction t,
++		 const char *dir, const char *node, const char *string);
++int xenbus_mkdir(struct xenbus_transaction t,
++		 const char *dir, const char *node);
++int xenbus_exists(struct xenbus_transaction t,
++		  const char *dir, const char *node);
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
++int xenbus_transaction_start(struct xenbus_transaction *t);
++int xenbus_transaction_end(struct xenbus_transaction t, int abort);
 +
-+/* These flags are passed in the 'flags' field of start_info_t. */
-+#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
-+#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
++/* Single read and scanf: returns -errno or num scanned if > 0. */
++int xenbus_scanf(struct xenbus_transaction t,
++		 const char *dir, const char *node, const char *fmt, ...)
++	__attribute__((format(scanf, 4, 5)));
 +
-+typedef struct dom0_vga_console_info {
-+    uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
-+#define XEN_VGATYPE_TEXT_MODE_3 0x03
-+#define XEN_VGATYPE_VESA_LFB    0x23
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++		  const char *dir, const char *node, const char *fmt, ...)
++	__attribute__((format(printf, 4, 5)));
 +
-+    union {
-+        struct {
-+            /* Font height, in pixels. */
-+            uint16_t font_height;
-+            /* Cursor location (column, row). */
-+            uint16_t cursor_x, cursor_y;
-+            /* Number of rows and columns (dimensions in characters). */
-+            uint16_t rows, columns;
-+        } text_mode_3;
++/* Generic read function: NULL-terminated triples of name,
++ * sprintf-style type string, and pointer. Returns 0 or errno.*/
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
 +
-+        struct {
-+            /* Width and height, in pixels. */
-+            uint16_t width, height;
-+            /* Bytes per scan line. */
-+            uint16_t bytes_per_line;
-+            /* Bits per pixel. */
-+            uint16_t bits_per_pixel;
-+            /* LFB physical address, and size (in units of 64kB). */
-+            uint32_t lfb_base;
-+            uint32_t lfb_size;
-+            /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
-+            uint8_t  red_pos, red_size;
-+            uint8_t  green_pos, green_size;
-+            uint8_t  blue_pos, blue_size;
-+            uint8_t  rsvd_pos, rsvd_size;
-+        } vesa_lfb;
-+    } u;
-+} dom0_vga_console_info_t;
-+#define xen_vga_console_info dom0_vga_console_info
-+#define xen_vga_console_info_t dom0_vga_console_info_t
++/* notifer routines for when the xenstore comes up */
++int register_xenstore_notifier(struct notifier_block *nb);
++void unregister_xenstore_notifier(struct notifier_block *nb);
 +
-+typedef uint8_t xen_domain_handle_t[16];
++int register_xenbus_watch(struct xenbus_watch *watch);
++void unregister_xenbus_watch(struct xenbus_watch *watch);
++void xs_suspend(void);
++void xs_resume(void);
++void xs_suspend_cancel(void);
 +
-+/* Turn a plain number into a C unsigned long constant. */
-+#define __mk_unsigned_long(x) x ## UL
-+#define mk_unsigned_long(x) __mk_unsigned_long(x)
++/* Used by xenbus_dev to borrow kernel's store connection. */
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
 +
-+DEFINE_XEN_GUEST_HANDLE(uint8_t);
-+DEFINE_XEN_GUEST_HANDLE(uint16_t);
-+DEFINE_XEN_GUEST_HANDLE(uint32_t);
-+DEFINE_XEN_GUEST_HANDLE(uint64_t);
++/* Prepare for domain suspend: then resume or cancel the suspend. */
++void xenbus_suspend(void);
++void xenbus_resume(void);
++void xenbus_suspend_cancel(void);
 +
-+#else /* __ASSEMBLY__ */
++#define XENBUS_IS_ERR_READ(str) ({			\
++	if (!IS_ERR(str) && strlen(str) == 0) {		\
++		kfree(str);				\
++		str = ERR_PTR(-ERANGE);			\
++	}						\
++	IS_ERR(str);					\
++})
 +
-+/* In assembly code we cannot use C numeric constant suffixes. */
-+#define mk_unsigned_long(x) x
++#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
 +
-+#endif /* !__ASSEMBLY__ */
 +
-+/* Default definitions for macros used by domctl/sysctl. */
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+#ifndef uint64_aligned_t
-+#define uint64_aligned_t uint64_t
-+#endif
-+#ifndef XEN_GUEST_HANDLE_64
-+#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
-+#endif
-+#endif
++/**
++ * Register a watch on the given path, using the given xenbus_watch structure
++ * for storage, and the given callback function as the callback.  Return 0 on
++ * success, or -errno on error.  On success, the given path will be saved as
++ * watch->node, and remains the caller's to free.  On error, watch->node will
++ * be NULL, the device will switch to XenbusStateClosing, and the error will
++ * be saved in the store.
++ */
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++		      struct xenbus_watch *watch,
++		      void (*callback)(struct xenbus_watch *,
++				       const char **, unsigned int));
 +
-+#endif /* __XEN_PUBLIC_XEN_H__ */
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++/**
++ * Register a watch on the given path/path2, using the given xenbus_watch
++ * structure for storage, and the given callback function as the callback.
++ * Return 0 on success, or -errno on error.  On success, the watched path
++ * (path/path2) will be saved as watch->node, and becomes the caller's to
++ * kfree().  On error, watch->node will be NULL, so the caller has nothing to
++ * free, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/interface/xenoprof.h tmp-linux-2.6-xen.patch/include/xen/interface/xenoprof.h
---- pristine-linux-2.6.18.2/include/xen/interface/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/interface/xenoprof.h	2007-09-30 18:06:19.000000000 +0200
-@@ -0,0 +1,138 @@
-+/******************************************************************************
-+ * xenoprof.h
-+ * 
-+ * Interface for enabling system wide profiling based on hardware performance
-+ * counters
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ * Written by Aravind Menon & Jose Renato Santos
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++		       const char *path2, struct xenbus_watch *watch,
++		       void (*callback)(struct xenbus_watch *,
++					const char **, unsigned int));
++
++
++/**
++ * Advertise in the store a change of the given driver to the given new_state.
++ * Return 0 on success, or -errno on error.  On error, the device will switch
++ * to XenbusStateClosing, and the error will be saved in the store.
 + */
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
 +
-+#ifndef __XEN_PUBLIC_XENOPROF_H__
-+#define __XEN_PUBLIC_XENOPROF_H__
 +
-+#include "xen.h"
++/**
++ * Grant access to the given ring_mfn to the peer of the given device.  Return
++ * 0 on success, or -errno on error.  On error, the device will switch to
++ * XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
 +
-+/*
-+ * Commands to HYPERVISOR_xenoprof_op().
++
++/**
++ * Map a page of memory into this domain from another domain's grant table.
++ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
++ * page to that address, and sets *vaddr to that address.
++ * xenbus_map_ring does not allocate the virtual address space (you must do
++ * this yourself!). It only maps in the page to the specified address.
++ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
++ * or -ENOMEM on error. If an error is returned, device will switch to
++ * XenbusStateClosing and the error message will be saved in XenStore.
 + */
-+#define XENOPROF_init                0
-+#define XENOPROF_reset_active_list   1
-+#define XENOPROF_reset_passive_list  2
-+#define XENOPROF_set_active          3
-+#define XENOPROF_set_passive         4
-+#define XENOPROF_reserve_counters    5
-+#define XENOPROF_counter             6
-+#define XENOPROF_setup_events        7
-+#define XENOPROF_enable_virq         8
-+#define XENOPROF_start               9
-+#define XENOPROF_stop               10
-+#define XENOPROF_disable_virq       11
-+#define XENOPROF_release_counters   12
-+#define XENOPROF_shutdown           13
-+#define XENOPROF_get_buffer         14
-+#define XENOPROF_set_backtrace      15
-+#define XENOPROF_last_op            15
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
++					 int gnt_ref);
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++			   grant_handle_t *handle, void *vaddr);
 +
-+#define MAX_OPROF_EVENTS    32
-+#define MAX_OPROF_DOMAINS   25
-+#define XENOPROF_CPU_TYPE_SIZE 64
 +
-+/* Xenoprof performance events (not Xen events) */
-+struct event_log {
-+    uint64_t eip;
-+    uint8_t mode;
-+    uint8_t event;
-+};
++/**
++ * Unmap a page of memory in this domain that was imported from another domain.
++ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
++ * xenbus_map_ring_valloc (it will free the virtual address space).
++ * Returns 0 on success and returns GNTST_* on error
++ * (see xen/include/interface/grant_table.h).
++ */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
++int xenbus_unmap_ring(struct xenbus_device *dev,
++		      grant_handle_t handle, void *vaddr);
++
 +
-+/* PC value that indicates a special code */
-+#define XENOPROF_ESCAPE_CODE ~0UL
-+/* Transient events for the xenoprof->oprofile cpu buf */
-+#define XENOPROF_TRACE_BEGIN 1
++/**
++ * Allocate an event channel for the given xenbus_device, assigning the newly
++ * created local port to *port.  Return 0 on success, or -errno on error.  On
++ * error, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
 +
-+/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
-+struct xenoprof_buf {
-+    uint32_t event_head;
-+    uint32_t event_tail;
-+    uint32_t event_size;
-+    uint32_t vcpu_id;
-+    uint64_t xen_samples;
-+    uint64_t kernel_samples;
-+    uint64_t user_samples;
-+    uint64_t lost_samples;
-+    struct event_log event_log[1];
-+};
-+#ifndef __XEN__
-+typedef struct xenoprof_buf xenoprof_buf_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
-+#endif
 +
-+struct xenoprof_init {
-+    int32_t  num_events;
-+    int32_t  is_primary;
-+    char cpu_type[XENOPROF_CPU_TYPE_SIZE];
-+};
-+typedef struct xenoprof_init xenoprof_init_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
++/**
++ * Free an existing event channel. Returns 0 on success or -errno on error.
++ */
++int xenbus_free_evtchn(struct xenbus_device *dev, int port);
 +
-+struct xenoprof_get_buffer {
-+    int32_t  max_samples;
-+    int32_t  nbuf;
-+    int32_t  bufsize;
-+    uint64_t buf_gmaddr;
-+};
-+typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
 +
-+struct xenoprof_counter {
-+    uint32_t ind;
-+    uint64_t count;
-+    uint32_t enabled;
-+    uint32_t event;
-+    uint32_t hypervisor;
-+    uint32_t kernel;
-+    uint32_t user;
-+    uint64_t unit_mask;
-+};
-+typedef struct xenoprof_counter xenoprof_counter_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
++/**
++ * Return the state of the driver rooted at the given store path, or
++ * XenbusStateUnknown if no state can be read.
++ */
++enum xenbus_state xenbus_read_driver_state(const char *path);
 +
-+typedef struct xenoprof_passive {
-+    uint16_t domain_id;
-+    int32_t  max_samples;
-+    int32_t  nbuf;
-+    int32_t  bufsize;
-+    uint64_t buf_gmaddr;
-+} xenoprof_passive_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
 +
++/***
++ * Report the given negative errno into the store, along with the given
++ * formatted message.
++ */
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		      ...);
 +
-+#endif /* __XEN_PUBLIC_XENOPROF_H__ */
 +
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
++/***
++ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
++ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
++ * closedown of this driver and its peer.
 + */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/pcifront.h tmp-linux-2.6-xen.patch/include/xen/pcifront.h
---- pristine-linux-2.6.18.2/include/xen/pcifront.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/pcifront.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,83 @@
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++		      ...);
++
++int xenbus_dev_init(void);
++
++const char *xenbus_strstate(enum xenbus_state state);
++int xenbus_dev_is_online(struct xenbus_device *dev);
++int xenbus_frontend_closed(struct xenbus_device *dev);
++
++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *));
++int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *));
++
++#endif /* _XEN_XENBUS_H */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/xencomm.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/xencomm.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,77 @@
 +/*
-+ * PCI Frontend - arch-dependendent declarations
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 + *
-+ *   Author: Ryan Wilson <hap9 at epoch.ncsc.mil>
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb at us.ibm.com>
++ *          Jerone Young <jyoung5 at us.ibm.com>
 + */
-+#ifndef __XEN_ASM_PCIFRONT_H__
-+#define __XEN_ASM_PCIFRONT_H__
 +
-+#include <linux/spinlock.h>
++#ifndef _LINUX_XENCOMM_H_
++#define _LINUX_XENCOMM_H_
 +
-+#ifdef __KERNEL__
++#include <xen/interface/xencomm.h>
 +
-+#ifndef __ia64__
++#define XENCOMM_MINI_ADDRS 3
++struct xencomm_mini {
++	struct xencomm_desc _desc;
++	uint64_t address[XENCOMM_MINI_ADDRS];
++};
 +
-+struct pcifront_device;
-+struct pci_bus;
++/* To avoid additionnal virt to phys conversion, an opaque structure is
++   presented.  */
++struct xencomm_handle;
 +
-+struct pcifront_sd {
-+	int domain;
-+	struct pcifront_device *pdev;
-+};
++extern void xencomm_free(struct xencomm_handle *desc);
++extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
++extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
++			unsigned long bytes,  struct xencomm_mini *xc_area);
 +
-+static inline struct pcifront_device *
-+pcifront_get_pdev(struct pcifront_sd *sd)
-+{
-+	return sd->pdev;
-+}
++#if 0
++#define XENCOMM_MINI_ALIGNED(xc_desc, n)				\
++	struct xencomm_mini xc_desc ## _base[(n)]			\
++	__attribute__((__aligned__(sizeof(struct xencomm_mini))));	\
++	struct xencomm_mini* xc_desc = &xc_desc ## _base[0];
++#else
++/*
++ * gcc bug workaround:
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
++ * gcc doesn't handle properly stack variable with
++ * __attribute__((__align__(sizeof(struct xencomm_mini))))
++ */
++#define XENCOMM_MINI_ALIGNED(xc_desc, n)				\
++	unsigned char xc_desc ## _base[((n) + 1 ) *			\
++				       sizeof(struct xencomm_mini)];	\
++	struct xencomm_mini *xc_desc = (struct xencomm_mini*)		\
++		((unsigned long)xc_desc ## _base +			\
++		 (sizeof(struct xencomm_mini) -				\
++		  ((unsigned long)xc_desc ## _base) %			\
++		  sizeof(struct xencomm_mini)));
++#endif
++#define xencomm_map_no_alloc(ptr, bytes)			\
++	({XENCOMM_MINI_ALIGNED(xc_desc, 1);			\
++		__xencomm_map_no_alloc(ptr, bytes, xc_desc);})
 +
-+static inline void pcifront_init_sd(struct pcifront_sd *sd,
-+				    unsigned int domain, unsigned int bus,
-+				    struct pcifront_device *pdev)
-+{
-+	sd->domain = domain;
-+	sd->pdev = pdev;
-+}
++/* provided by architecture code: */
++extern unsigned long xencomm_vtop(unsigned long vaddr);
 +
-+#if defined(CONFIG_PCI_DOMAINS)
-+static inline int pci_domain_nr(struct pci_bus *bus)
-+{
-+	struct pcifront_sd *sd = bus->sysdata;
-+	return sd->domain;
-+}
-+static inline int pci_proc_domain(struct pci_bus *bus)
++static inline void *xencomm_pa(void *ptr)
 +{
-+	return pci_domain_nr(bus);
++	return (void *)xencomm_vtop((unsigned long)ptr);
 +}
-+#endif /* CONFIG_PCI_DOMAINS */
 +
-+static inline void pcifront_setup_root_resources(struct pci_bus *bus,
-+						 struct pcifront_sd *sd)
-+{
-+}
++#define xen_guest_handle(hnd)  ((hnd).p)
 +
-+#else /* __ia64__ */
++#endif /* _LINUX_XENCOMM_H_ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/xencons.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/xencons.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,17 @@
++#ifndef __ASM_XENCONS_H__
++#define __ASM_XENCONS_H__
 +
-+#include <linux/acpi.h>
-+#include <asm/pci.h>
-+#define pcifront_sd pci_controller
++struct dom0_vga_console_info;
++void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t);
 +
-+extern void xen_add_resource(struct pci_controller *, unsigned int,
-+			     unsigned int, struct acpi_resource *);
-+extern void xen_pcibios_setup_root_windows(struct pci_bus *,
-+					   struct pci_controller *);
++void xencons_force_flush(void);
++void xencons_resume(void);
 +
-+static inline struct pcifront_device *
-+pcifront_get_pdev(struct pcifront_sd *sd)
-+{
-+	return (struct pcifront_device *)sd->platform_data;
-+}
++/* Interrupt work hooks. Receive data, or kick data out. */
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_tx(void);
 +
-+static inline void pcifront_setup_root_resources(struct pci_bus *bus,
-+						 struct pcifront_sd *sd)
-+{
-+	xen_pcibios_setup_root_windows(bus, sd);
-+}
++int xencons_ring_init(void);
++int xencons_ring_send(const char *data, unsigned len);
 +
-+#endif /* __ia64__ */
++#endif /* __ASM_XENCONS_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb include/xen/xenoprof.h
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/include/xen/xenoprof.h	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,42 @@
++/******************************************************************************
++ * xen/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ *                    VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
 +
-+extern struct rw_semaphore pci_bus_sem;
++#ifndef __XEN_XENOPROF_H__
++#define __XEN_XENOPROF_H__
++#ifdef CONFIG_XEN
 +
-+#endif /* __KERNEL__ */
++#include <asm/xenoprof.h>
 +
-+#endif /* __XEN_ASM_PCIFRONT_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/public/evtchn.h tmp-linux-2.6-xen.patch/include/xen/public/evtchn.h
---- pristine-linux-2.6.18.2/include/xen/public/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/public/evtchn.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,88 @@
-+/******************************************************************************
-+ * evtchn.h
-+ * 
-+ * Interface to /dev/xen/evtchn.
-+ * 
-+ * Copyright (c) 2003-2005, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++struct oprofile_operations;
++int xenoprofile_init(struct oprofile_operations * ops);
++void xenoprofile_exit(void);
 +
-+#ifndef __LINUX_PUBLIC_EVTCHN_H__
-+#define __LINUX_PUBLIC_EVTCHN_H__
++struct xenoprof_shared_buffer {
++	char					*buffer;
++	struct xenoprof_arch_shared_buffer	arch;
++};
++#else
++#define xenoprofile_init(ops)	(-ENOSYS)
++#define xenoprofile_exit()	do { } while (0)
++
++#endif /* CONFIG_XEN */
++#endif /* __XEN_XENOPROF_H__ */
+diff -r d894e36cfc30 -r 0aa021803deb kernel/Kconfig.preempt
+--- a/kernel/Kconfig.preempt	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/Kconfig.preempt	Wed Sep 10 10:54:08 2008 +0100
+@@ -35,6 +35,7 @@
+ 
+ config PREEMPT
+ 	bool "Preemptible Kernel (Low-Latency Desktop)"
++	depends on !XEN
+ 	help
+ 	  This option reduces the latency of the kernel by making
+ 	  all kernel code (that is not executing in a critical section)
+diff -r d894e36cfc30 -r 0aa021803deb kernel/cpu.c
+--- a/kernel/cpu.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/cpu.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -48,7 +48,10 @@
+ 
+ void unlock_cpu_hotplug(void)
+ {
++/* cpufreq lock-takers fixed in mainline; shut up until dom0 kernel catches up*/
++#ifdef CONFIG_XEN
+ 	WARN_ON(recursive != current);
++#endif
+ 	if (recursive_depth) {
+ 		recursive_depth--;
+ 		return;
+diff -r d894e36cfc30 -r 0aa021803deb kernel/fork.c
+--- a/kernel/fork.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/fork.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -276,6 +276,9 @@
+ 		if (retval)
+ 			goto out;
+ 	}
++#ifdef arch_dup_mmap
++	arch_dup_mmap(mm, oldmm);
++#endif
+ 	retval = 0;
+ out:
+ 	up_write(&mm->mmap_sem);
+diff -r d894e36cfc30 -r 0aa021803deb kernel/irq/spurious.c
+--- a/kernel/irq/spurious.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/irq/spurious.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -139,7 +139,8 @@
+ 		    irqreturn_t action_ret, struct pt_regs *regs)
+ {
+ 	if (unlikely(action_ret != IRQ_HANDLED)) {
+-		desc->irqs_unhandled++;
++		if (!irq_ignore_unhandled(irq))
++			desc->irqs_unhandled++;
+ 		if (unlikely(action_ret != IRQ_NONE))
+ 			report_bad_irq(irq, desc, action_ret);
+ 	}
+diff -r d894e36cfc30 -r 0aa021803deb kernel/kexec.c
+--- a/kernel/kexec.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/kexec.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -330,13 +330,26 @@
+ 	return 0;
+ }
+ 
+-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
++static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
+ {
+ 	struct page *pages;
+ 
+ 	pages = alloc_pages(gfp_mask, order);
+ 	if (pages) {
+ 		unsigned int count, i;
++#ifdef CONFIG_XEN
++		int address_bits;
++
++		if (limit == ~0UL)
++			address_bits = BITS_PER_LONG;
++		else
++			address_bits = long_log2(limit);
++
++		if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
++			__free_pages(pages, order);
++			return NULL;
++		}
++#endif
+ 		pages->mapping = NULL;
+ 		set_page_private(pages, order);
+ 		count = 1 << order;
+@@ -355,6 +368,9 @@
+ 	count = 1 << order;
+ 	for (i = 0; i < count; i++)
+ 		ClearPageReserved(page + i);
++#ifdef CONFIG_XEN
++	xen_destroy_contiguous_region((unsigned long)page_address(page), order);
++#endif
+ 	__free_pages(page, order);
+ }
+ 
+@@ -400,10 +416,10 @@
+ 	do {
+ 		unsigned long pfn, epfn, addr, eaddr;
+ 
+-		pages = kimage_alloc_pages(GFP_KERNEL, order);
++		pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
+ 		if (!pages)
+ 			break;
+-		pfn   = page_to_pfn(pages);
++		pfn   = kexec_page_to_pfn(pages);
+ 		epfn  = pfn + count;
+ 		addr  = pfn << PAGE_SHIFT;
+ 		eaddr = epfn << PAGE_SHIFT;
+@@ -437,6 +453,7 @@
+ 	return pages;
+ }
+ 
++#ifndef CONFIG_XEN
+ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ 						      unsigned int order)
+ {
+@@ -490,7 +507,7 @@
+ 		}
+ 		/* If I don't overlap any segments I have found my hole! */
+ 		if (i == image->nr_segments) {
+-			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
++			pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
+ 			break;
+ 		}
+ 	}
+@@ -517,6 +534,13 @@
+ 
+ 	return pages;
+ }
++#else /* !CONFIG_XEN */
++struct page *kimage_alloc_control_pages(struct kimage *image,
++					 unsigned int order)
++{
++	return kimage_alloc_normal_control_pages(image, order);
++}
++#endif
+ 
+ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
+ {
+@@ -532,7 +556,7 @@
+ 			return -ENOMEM;
+ 
+ 		ind_page = page_address(page);
+-		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
++		*image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
+ 		image->entry = ind_page;
+ 		image->last_entry = ind_page +
+ 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
+@@ -593,13 +617,13 @@
+ #define for_each_kimage_entry(image, ptr, entry) \
+ 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ 		ptr = (entry & IND_INDIRECTION)? \
+-			phys_to_virt((entry & PAGE_MASK)): ptr +1)
++			kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
+ 
+ static void kimage_free_entry(kimage_entry_t entry)
+ {
+ 	struct page *page;
+ 
+-	page = pfn_to_page(entry >> PAGE_SHIFT);
++	page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
+ 	kimage_free_pages(page);
+ }
+ 
+@@ -610,6 +634,10 @@
+ 
+ 	if (!image)
+ 		return;
++
++#ifdef CONFIG_XEN
++	xen_machine_kexec_unload(image);
++#endif
+ 
+ 	kimage_free_extra_pages(image);
+ 	for_each_kimage_entry(image, ptr, entry) {
+@@ -686,7 +714,7 @@
+ 	 * have a match.
+ 	 */
+ 	list_for_each_entry(page, &image->dest_pages, lru) {
+-		addr = page_to_pfn(page) << PAGE_SHIFT;
++		addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ 		if (addr == destination) {
+ 			list_del(&page->lru);
+ 			return page;
+@@ -697,16 +725,16 @@
+ 		kimage_entry_t *old;
+ 
+ 		/* Allocate a page, if we run out of memory give up */
+-		page = kimage_alloc_pages(gfp_mask, 0);
++		page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
+ 		if (!page)
+ 			return NULL;
+ 		/* If the page cannot be used file it away */
+-		if (page_to_pfn(page) >
++		if (kexec_page_to_pfn(page) >
+ 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
+ 			list_add(&page->lru, &image->unuseable_pages);
+ 			continue;
+ 		}
+-		addr = page_to_pfn(page) << PAGE_SHIFT;
++		addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ 
+ 		/* If it is the destination page we want use it */
+ 		if (addr == destination)
+@@ -729,7 +757,7 @@
+ 			struct page *old_page;
+ 
+ 			old_addr = *old & PAGE_MASK;
+-			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
++			old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
+ 			copy_highpage(page, old_page);
+ 			*old = addr | (*old & ~PAGE_MASK);
+ 
+@@ -779,7 +807,7 @@
+ 			result  = -ENOMEM;
+ 			goto out;
+ 		}
+-		result = kimage_add_page(image, page_to_pfn(page)
++		result = kimage_add_page(image, kexec_page_to_pfn(page)
+ 								<< PAGE_SHIFT);
+ 		if (result < 0)
+ 			goto out;
+@@ -811,6 +839,7 @@
+ 	return result;
+ }
+ 
++#ifndef CONFIG_XEN
+ static int kimage_load_crash_segment(struct kimage *image,
+ 					struct kexec_segment *segment)
+ {
+@@ -833,7 +862,7 @@
+ 		char *ptr;
+ 		size_t uchunk, mchunk;
+ 
+-		page = pfn_to_page(maddr >> PAGE_SHIFT);
++		page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
+ 		if (page == 0) {
+ 			result  = -ENOMEM;
+ 			goto out;
+@@ -851,6 +880,7 @@
+ 			memset(ptr + uchunk, 0, mchunk - uchunk);
+ 		}
+ 		result = copy_from_user(ptr, buf, uchunk);
++		kexec_flush_icache_page(page);
+ 		kunmap(page);
+ 		if (result) {
+ 			result = (result < 0) ? result : -EIO;
+@@ -881,6 +911,13 @@
+ 
+ 	return result;
+ }
++#else /* CONFIG_XEN */
++static int kimage_load_segment(struct kimage *image,
++				struct kexec_segment *segment)
++{
++	return kimage_load_normal_segment(image, segment);
++}
++#endif
+ 
+ /*
+  * Exec Kernel system call: for obvious reasons only root may call it.
+@@ -991,6 +1028,13 @@
+ 		if (result)
+ 			goto out;
+ 	}
++#ifdef CONFIG_XEN
++	if (image) {
++		result = xen_machine_kexec_load(image);
++		if (result)
++			goto out;
++	}
++#endif
+ 	/* Install the new kernel, and  Uninstall the old */
+ 	image = xchg(dest_image, image);
+ 
+@@ -1044,7 +1088,6 @@
+ {
+ 	int locked;
+ 
+-
+ 	/* Take the kexec_lock here to prevent sys_kexec_load
+ 	 * running on one cpu from replacing the crash kernel
+ 	 * we are using after a panic on a different cpu.
+diff -r d894e36cfc30 -r 0aa021803deb kernel/resource.c
+--- a/kernel/resource.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/resource.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -35,6 +35,16 @@
+ 	.flags	= IORESOURCE_MEM,
+ };
+ EXPORT_SYMBOL(iomem_resource);
 +
-+/*
-+ * Bind a fresh port to VIRQ @virq.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_VIRQ				\
-+	_IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
-+struct ioctl_evtchn_bind_virq {
-+	unsigned int virq;
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++struct resource iomem_machine_resource = {
++	.name	= "Machine PCI mem",
++	.start	= 0,
++	.end	= -1,
++	.flags	= IORESOURCE_MEM,
 +};
++EXPORT_SYMBOL(iomem_machine_resource);
++#endif
+ 
+ static DEFINE_RWLOCK(resource_lock);
+ 
+@@ -115,6 +125,18 @@
+ 	return res;
+ }
+ 
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static int iomem_machine_open(struct inode *inode, struct file *file)
++{
++	int res = seq_open(file, &resource_op);
++	if (!res) {
++		struct seq_file *m = file->private_data;
++		m->private = &iomem_machine_resource;
++	}
++	return res;
++}
++#endif
 +
-+/*
-+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_INTERDOMAIN			\
-+	_IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
-+struct ioctl_evtchn_bind_interdomain {
-+	unsigned int remote_domain, remote_port;
+ static struct file_operations proc_ioports_operations = {
+ 	.open		= ioports_open,
+ 	.read		= seq_read,
+@@ -129,6 +151,15 @@
+ 	.release	= seq_release,
+ };
+ 
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static struct file_operations proc_iomem_machine_operations = {
++	.open		= iomem_machine_open,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= seq_release,
 +};
++#endif
++
+ static int __init ioresources_init(void)
+ {
+ 	struct proc_dir_entry *entry;
+@@ -139,6 +170,13 @@
+ 	entry = create_proc_entry("iomem", 0, NULL);
+ 	if (entry)
+ 		entry->proc_fops = &proc_iomem_operations;
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++	if (is_initial_xendomain()) {
++		entry = create_proc_entry("iomem_machine", 0, NULL);
++		if (entry)
++			entry->proc_fops = &proc_iomem_machine_operations;
++	}
++#endif
+ 	return 0;
+ }
+ __initcall(ioresources_init);
+diff -r d894e36cfc30 -r 0aa021803deb kernel/softlockup.c
+--- a/kernel/softlockup.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/softlockup.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -39,6 +39,19 @@
+ 	__raw_get_cpu_var(touch_timestamp) = jiffies;
+ }
+ EXPORT_SYMBOL(touch_softlockup_watchdog);
++
++unsigned long softlockup_get_next_event(void)
++{
++	int this_cpu = smp_processor_id();
++	unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
++
++	if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
++		did_panic ||
++			!per_cpu(watchdog_task, this_cpu))
++		return MAX_JIFFY_OFFSET;
 +
++	return max_t(long, 0, touch_timestamp + HZ - jiffies);
++}
+ 
+ /*
+  * This callback runs from the timer interrupt, and checks
+diff -r d894e36cfc30 -r 0aa021803deb kernel/sysctl.c
+--- a/kernel/sysctl.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/sysctl.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -661,7 +661,7 @@
+ 		.proc_handler	= &proc_dointvec,
+ 	},
+ #endif
+-#ifdef CONFIG_ACPI_SLEEP
++#if defined(CONFIG_ACPI_SLEEP) && !defined(CONFIG_ACPI_PV_SLEEP)
+ 	{
+ 		.ctl_name	= KERN_ACPI_VIDEO_FLAGS,
+ 		.procname	= "acpi_video_flags",
+diff -r d894e36cfc30 -r 0aa021803deb kernel/timer.c
+--- a/kernel/timer.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/kernel/timer.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -485,7 +485,9 @@
+ 		if (hr_expires < 3)
+ 			return hr_expires + jiffies;
+ 	}
+-	hr_expires += jiffies;
++	hr_expires = min_t(unsigned long,
++			   softlockup_get_next_event(),
++			   hr_expires) + jiffies;
+ 
+ 	base = __get_cpu_var(tvec_bases);
+ 	spin_lock(&base->lock);
+diff -r d894e36cfc30 -r 0aa021803deb lib/Makefile
+--- a/lib/Makefile	Tue Sep 09 11:37:38 2008 +0200
++++ b/lib/Makefile	Wed Sep 10 10:54:08 2008 +0100
+@@ -52,6 +52,9 @@
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+ 
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++ifeq ($(CONFIG_IA64),y)
++swiotlb-$(CONFIG_XEN) := ../arch/ia64/xen/swiotlb.o
++endif
+ 
+ hostprogs-y	:= gen_crc32table
+ clean-files	:= crc32table.h
+diff -r d894e36cfc30 -r 0aa021803deb lib/swiotlb-xen.c
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/lib/swiotlb-xen.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,739 @@
 +/*
-+ * Allocate a fresh port for binding to @remote_domain.
-+ * Return allocated port.
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ *	David Mosberger-Tang <davidm at hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
 + */
-+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT			\
-+	_IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
-+struct ioctl_evtchn_bind_unbound_port {
-+	unsigned int remote_domain;
-+};
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/gnttab.h>
++#include <xen/interface/memory.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
 +
 +/*
-+ * Unbind previously allocated @port.
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2.  What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
 + */
-+#define IOCTL_EVTCHN_UNBIND				\
-+	_IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
-+struct ioctl_evtchn_unbind {
-+	unsigned int port;
-+};
++#define IO_TLB_SEGSIZE	128
 +
 +/*
-+ * Unbind previously allocated @port.
++ * log of the size of each IO TLB slab.  The number of slabs is command line
++ * controllable.
 + */
-+#define IOCTL_EVTCHN_NOTIFY				\
-+	_IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
-+struct ioctl_evtchn_notify {
-+	unsigned int port;
-+};
++#define IO_TLB_SHIFT 11
 +
-+/* Clear and reinitialise the event buffer. Clear error condition. */
-+#define IOCTL_EVTCHN_RESET				\
-+	_IOC(_IOC_NONE, 'E', 5, 0)
++int swiotlb_force;
 +
-+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/public/gntdev.h tmp-linux-2.6-xen.patch/include/xen/public/gntdev.h
---- pristine-linux-2.6.18.2/include/xen/public/gntdev.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/public/gntdev.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,105 @@
-+/******************************************************************************
-+ * gntdev.h
-+ * 
-+ * Interface to /dev/xen/gntdev.
-+ * 
-+ * Copyright (c) 2007, D G Murray
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
 +
-+#ifndef __LINUX_PUBLIC_GNTDEV_H__
-+#define __LINUX_PUBLIC_GNTDEV_H__
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
 +
-+struct ioctl_gntdev_grant_ref {
-+	/* The domain ID of the grant to be mapped. */
-+	uint32_t domid;
-+	/* The grant reference of the grant to be mapped. */
-+	uint32_t ref;
-+};
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
++{
++	unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++	return (pfn_valid(pfn)
++		&& (pfn >= iotlb_pfn_start)
++		&& (pfn < iotlb_pfn_end));
++}
 +
 +/*
-+ * Inserts the grant references into the mapping table of an instance
-+ * of gntdev. N.B. This does not perform the mapping, which is deferred
-+ * until mmap() is called with @index as the offset.
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
 + */
-+#define IOCTL_GNTDEV_MAP_GRANT_REF \
-+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
-+struct ioctl_gntdev_map_grant_ref {
-+	/* IN parameters */
-+	/* The number of grants to be mapped. */
-+	uint32_t count;
-+	uint32_t pad;
-+	/* OUT parameters */
-+	/* The offset to be used on a subsequent call to mmap(). */
-+	uint64_t index;
-+	/* Variable IN parameter. */
-+	/* Array of grant references, of size @count. */
-+	struct ioctl_gntdev_grant_ref refs[1];
-+};
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
 +
 +/*
-+ * Removes the grant references from the mapping table of an instance of
-+ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
-+ * before this ioctl is called, or an error will result.
++ * This is a free list describing the number of free entries available from
++ * each index
 + */
-+#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
-+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))       
-+struct ioctl_gntdev_unmap_grant_ref {
-+	/* IN parameters */
-+	/* The offset was returned by the corresponding map operation. */
-+	uint64_t index;
-+	/* The number of pages to be unmapped. */
-+	uint32_t count;
-+	uint32_t pad;
-+};
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
 +
 +/*
-+ * Returns the offset in the driver's address space that corresponds
-+ * to @vaddr. This can be used to perform a munmap(), followed by an
-+ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
-+ * the caller. The number of pages that were allocated at the same time as
-+ * @vaddr is returned in @count.
-+ *
-+ * N.B. Where more than one page has been mapped into a contiguous range, the
-+ *      supplied @vaddr must correspond to the start of the range; otherwise
-+ *      an error will result. It is only possible to munmap() the entire
-+ *      contiguously-allocated range at once, and not any subrange thereof.
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
 + */
-+#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
-+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
-+struct ioctl_gntdev_get_offset_for_vaddr {
-+	/* IN parameters */
-+	/* The virtual address of the first mapped page in a range. */
-+	uint64_t vaddr;
-+	/* OUT parameters */
-+	/* The offset that was used in the initial mmap() operation. */
-+	uint64_t offset;
-+	/* The number of pages mapped in the VM area that begins at @vaddr. */
-+	uint32_t count;
-+	uint32_t pad;
-+};
++static struct phys_addr {
++	struct page *page;
++	unsigned int offset;
++} *io_tlb_orig_addr;
 +
-+#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/public/privcmd.h tmp-linux-2.6-xen.patch/include/xen/public/privcmd.h
---- pristine-linux-2.6.18.2/include/xen/public/privcmd.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/public/privcmd.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,79 @@
-+/******************************************************************************
-+ * privcmd.h
-+ * 
-+ * Interface to /proc/xen/privcmd.
-+ * 
-+ * Copyright (c) 2003-2005, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++/*
++ * Protect the above data structures in the map and unmap calls
 + */
++static DEFINE_SPINLOCK(io_tlb_lock);
 +
-+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
-+#define __LINUX_PUBLIC_PRIVCMD_H__
-+
-+#include <linux/types.h>
-+
-+#ifndef __user
-+#define __user
-+#endif
-+
-+typedef struct privcmd_hypercall
++static unsigned int dma_bits;
++static unsigned int __initdata max_dma_bits = 32;
++static int __init
++setup_dma_bits(char *str)
 +{
-+	__u64 op;
-+	__u64 arg[5];
-+} privcmd_hypercall_t;
-+
-+typedef struct privcmd_mmap_entry {
-+	__u64 va;
-+	__u64 mfn;
-+	__u64 npages;
-+} privcmd_mmap_entry_t; 
-+
-+typedef struct privcmd_mmap {
-+	int num;
-+	domid_t dom; /* target domain */
-+	privcmd_mmap_entry_t __user *entry;
-+} privcmd_mmap_t; 
++	max_dma_bits = simple_strtoul(str, NULL, 0);
++	return 0;
++}
++__setup("dma_bits=", setup_dma_bits);
 +
-+typedef struct privcmd_mmapbatch {
-+	int num;     /* number of pages to populate */
-+	domid_t dom; /* target domain */
-+	__u64 addr;  /* virtual address */
-+	xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
-+} privcmd_mmapbatch_t; 
++static int __init
++setup_io_tlb_npages(char *str)
++{
++	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++	if (isdigit(*str)) {
++		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++			(20 - IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++	}
++	if (*str == ',')
++		++str;
++	/*
++         * NB. 'force' enables the swiotlb, but doesn't force its use for
++         * every DMA like it does on native Linux. 'off' forcibly disables
++         * use of the swiotlb.
++         */
++	if (!strcmp(str, "force"))
++		swiotlb_force = 1;
++	else if (!strcmp(str, "off"))
++		swiotlb_force = -1;
++	return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
 +
 +/*
-+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
-+ * @arg: &privcmd_hypercall_t
-+ * Return: Value returned from execution of the specified hypercall.
-+ */
-+#define IOCTL_PRIVCMD_HYPERCALL					\
-+	_IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-+#define IOCTL_PRIVCMD_MMAP					\
-+	_IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-+#define IOCTL_PRIVCMD_MMAPBATCH					\
-+	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-+
-+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/xenbus.h tmp-linux-2.6-xen.patch/include/xen/xenbus.h
---- pristine-linux-2.6.18.2/include/xen/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/xenbus.h	2007-09-30 18:06:18.000000000 +0200
-@@ -0,0 +1,304 @@
-+/******************************************************************************
-+ * xenbus.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
 + */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++	unsigned long i, bytes;
++	int rc;
 +
-+#ifndef _XEN_XENBUS_H
-+#define _XEN_XENBUS_H
++	if (!iotlb_nslabs) {
++		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++	}
 +
-+#include <linux/device.h>
-+#include <linux/notifier.h>
-+#include <linux/mutex.h>
-+#include <linux/completion.h>
-+#include <linux/init.h>
-+#include <linux/err.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/xenbus.h>
-+#include <xen/interface/io/xs_wire.h>
++	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
 +
-+/* Register callback to watch this node. */
-+struct xenbus_watch
-+{
-+	struct list_head list;
++	/*
++	 * Get IO TLB memory from the low pages
++	 */
++	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++	if (!iotlb_virt_start)
++		panic("Cannot allocate SWIOTLB buffer!\n");
 +
-+	/* Path being watched. */
-+	const char *node;
++	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++	for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++		do {
++			rc = xen_create_contiguous_region(
++				(unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++				dma_bits);
++		} while (rc && dma_bits++ < max_dma_bits);
++		if (rc) {
++			if (i == 0)
++				panic("No suitable physical memory available for SWIOTLB buffer!\n"
++				      "Use dom0_mem Xen boot parameter to reserve\n"
++				      "some DMA memory (e.g., dom0_mem=-128M).\n");
++			iotlb_nslabs = i;
++			i <<= IO_TLB_SHIFT;
++			free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
++			bytes = i;
++			for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
++				unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
 +
-+	/* Callback (executed in a process context with no locks held). */
-+	void (*callback)(struct xenbus_watch *,
-+			 const char **vec, unsigned int len);
++				if (bits > dma_bits)
++					dma_bits = bits;
++			}
++			break;
++		}
++	}
 +
-+	/* See XBWF_ definitions below. */
-+	unsigned long flags;
-+};
++	/*
++	 * Allocate and initialize the free list array.  This array is used
++	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++	 */
++	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++	for (i = 0; i < iotlb_nslabs; i++)
++ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++	io_tlb_index = 0;
++	io_tlb_orig_addr = alloc_bootmem(
++		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
 +
-+/*
-+ * Execute callback in its own kthread. Useful if the callback is long
-+ * running or heavily serialised, to avoid taking out the main xenwatch thread
-+ * for a long period of time (or even unwittingly causing a deadlock).
-+ */
-+#define XBWF_new_thread	1
++	/*
++	 * Get the overflow emergency buffer
++	 */
++	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++	if (!io_tlb_overflow_buffer)
++		panic("Cannot allocate SWIOTLB overflow buffer!\n");
 +
-+/* A xenbus device. */
-+struct xenbus_device {
-+	const char *devicetype;
-+	const char *nodename;
-+	const char *otherend;
-+	int otherend_id;
-+	struct xenbus_watch otherend_watch;
-+	struct device dev;
-+	enum xenbus_state state;
-+	struct completion down;
-+};
++	do {
++		rc = xen_create_contiguous_region(
++			(unsigned long)io_tlb_overflow_buffer,
++			get_order(io_tlb_overflow),
++			dma_bits);
++	} while (rc && dma_bits++ < max_dma_bits);
++	if (rc)
++		panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
 +
-+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-+{
-+	return container_of(dev, struct xenbus_device, dev);
++	iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++	iotlb_pfn_end   = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++
++	printk(KERN_INFO "Software IO TLB enabled: \n"
++	       " Aperture:     %lu megabytes\n"
++	       " Kernel range: %p - %p\n"
++	       " Address size: %u bits\n",
++	       bytes >> 20,
++	       iotlb_virt_start, iotlb_virt_start + bytes,
++	       dma_bits);
 +}
 +
-+struct xenbus_device_id
++void
++swiotlb_init(void)
 +{
-+	/* .../device/<device_type>/<identifier> */
-+	char devicetype[32]; 	/* General class of device. */
-+};
++	long ram_end;
++	size_t defsz = 64 * (1 << 20); /* 64MB default size */
 +
-+/* A xenbus driver. */
-+struct xenbus_driver {
-+	char *name;
-+	struct module *owner;
-+	const struct xenbus_device_id *ids;
-+	int (*probe)(struct xenbus_device *dev,
-+		     const struct xenbus_device_id *id);
-+	void (*otherend_changed)(struct xenbus_device *dev,
-+				 enum xenbus_state backend_state);
-+	int (*remove)(struct xenbus_device *dev);
-+	int (*suspend)(struct xenbus_device *dev);
-+	int (*suspend_cancel)(struct xenbus_device *dev);
-+	int (*resume)(struct xenbus_device *dev);
-+	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
-+	struct device_driver driver;
-+	int (*read_otherend_details)(struct xenbus_device *dev);
-+	int (*is_ready)(struct xenbus_device *dev);
-+};
++	if (swiotlb_force == 1) {
++		swiotlb = 1;
++	} else if ((swiotlb_force != -1) &&
++		   is_running_on_xen() &&
++		   is_initial_xendomain()) {
++		/* Domain 0 always has a swiotlb. */
++		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++		if (ram_end <= 0x7ffff)
++			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++		swiotlb = 1;
++	}
 +
-+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-+{
-+	return container_of(drv, struct xenbus_driver, driver);
++	if (swiotlb)
++		swiotlb_init_with_default_size(defsz);
++	else
++		printk(KERN_INFO "Software IO TLB disabled\n");
 +}
 +
-+int xenbus_register_frontend(struct xenbus_driver *drv);
-+int xenbus_register_backend(struct xenbus_driver *drv);
-+void xenbus_unregister_driver(struct xenbus_driver *drv);
-+
-+struct xenbus_transaction
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
 +{
-+	u32 id;
-+};
-+
-+/* Nil transaction ID. */
-+#define XBT_NIL ((struct xenbus_transaction) { 0 })
-+
-+char **xenbus_directory(struct xenbus_transaction t,
-+			const char *dir, const char *node, unsigned int *num);
-+void *xenbus_read(struct xenbus_transaction t,
-+		  const char *dir, const char *node, unsigned int *len);
-+int xenbus_write(struct xenbus_transaction t,
-+		 const char *dir, const char *node, const char *string);
-+int xenbus_mkdir(struct xenbus_transaction t,
-+		 const char *dir, const char *node);
-+int xenbus_exists(struct xenbus_transaction t,
-+		  const char *dir, const char *node);
-+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
-+int xenbus_transaction_start(struct xenbus_transaction *t);
-+int xenbus_transaction_end(struct xenbus_transaction t, int abort);
++	if (PageHighMem(buffer.page)) {
++		size_t len, bytes;
++		char *dev, *host, *kmp;
++		len = size;
++		while (len != 0) {
++			unsigned long flags;
 +
-+/* Single read and scanf: returns -errno or num scanned if > 0. */
-+int xenbus_scanf(struct xenbus_transaction t,
-+		 const char *dir, const char *node, const char *fmt, ...)
-+	__attribute__((format(scanf, 4, 5)));
++			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++				bytes = PAGE_SIZE - buffer.offset;
++			local_irq_save(flags); /* protects KM_BOUNCE_READ */
++			kmp  = kmap_atomic(buffer.page, KM_BOUNCE_READ);
++			dev  = dma_addr + size - len;
++			host = kmp + buffer.offset;
++			if (dir == DMA_FROM_DEVICE) {
++				if (__copy_to_user_inatomic(host, dev, bytes))
++					/* inaccessible */;
++			} else
++				memcpy(dev, host, bytes);
++			kunmap_atomic(kmp, KM_BOUNCE_READ);
++			local_irq_restore(flags);
++			len -= bytes;
++			buffer.page++;
++			buffer.offset = 0;
++		}
++	} else {
++		char *host = (char *)phys_to_virt(
++			page_to_pseudophys(buffer.page)) + buffer.offset;
++		if (dir == DMA_FROM_DEVICE) {
++			if (__copy_to_user_inatomic(host, dma_addr, size))
++				/* inaccessible */;
++		} else if (dir == DMA_TO_DEVICE)
++			memcpy(dma_addr, host, size);
++	}
++}
 +
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(struct xenbus_transaction t,
-+		  const char *dir, const char *node, const char *fmt, ...)
-+	__attribute__((format(printf, 4, 5)));
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++	unsigned long flags;
++	char *dma_addr;
++	unsigned int nslots, stride, index, wrap;
++	struct phys_addr slot_buf;
++	int i;
 +
-+/* Generic read function: NULL-terminated triples of name,
-+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
-+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
++	/*
++	 * For mappings greater than a page, we limit the stride (and
++	 * hence alignment) to a page size.
++	 */
++	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	if (size > PAGE_SIZE)
++		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++	else
++		stride = 1;
 +
-+/* notifer routines for when the xenstore comes up */
-+int register_xenstore_notifier(struct notifier_block *nb);
-+void unregister_xenstore_notifier(struct notifier_block *nb);
++	BUG_ON(!nslots);
 +
-+int register_xenbus_watch(struct xenbus_watch *watch);
-+void unregister_xenbus_watch(struct xenbus_watch *watch);
-+void xs_suspend(void);
-+void xs_resume(void);
-+void xs_suspend_cancel(void);
++	/*
++	 * Find suitable number of IO TLB entries size that will fit this
++	 * request and allocate a buffer from that IO TLB pool.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		wrap = index = ALIGN(io_tlb_index, stride);
 +
-+/* Used by xenbus_dev to borrow kernel's store connection. */
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
++		if (index >= iotlb_nslabs)
++			wrap = index = 0;
 +
-+/* Prepare for domain suspend: then resume or cancel the suspend. */
-+void xenbus_suspend(void);
-+void xenbus_resume(void);
-+void xenbus_suspend_cancel(void);
++		do {
++			/*
++			 * If we find a slot that indicates we have 'nslots'
++			 * number of contiguous buffers, we allocate the
++			 * buffers from that slot and mark the entries as '0'
++			 * indicating unavailable.
++			 */
++			if (io_tlb_list[index] >= nslots) {
++				int count = 0;
 +
-+#define XENBUS_IS_ERR_READ(str) ({			\
-+	if (!IS_ERR(str) && strlen(str) == 0) {		\
-+		kfree(str);				\
-+		str = ERR_PTR(-ERANGE);			\
-+	}						\
-+	IS_ERR(str);					\
-+})
++				for (i = index; i < (int)(index + nslots); i++)
++					io_tlb_list[i] = 0;
++				for (i = index - 1;
++				     (OFFSET(i, IO_TLB_SEGSIZE) !=
++				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++				     i--)
++					io_tlb_list[i] = ++count;
++				dma_addr = iotlb_virt_start +
++					(index << IO_TLB_SHIFT);
 +
-+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
++				/*
++				 * Update the indices to avoid searching in
++				 * the next round.
++				 */
++				io_tlb_index = 
++					((index + nslots) < iotlb_nslabs
++					 ? (index + nslots) : 0);
 +
++				goto found;
++			}
++			index += stride;
++			if (index >= iotlb_nslabs)
++				index = 0;
++		} while (index != wrap);
 +
-+/**
-+ * Register a watch on the given path, using the given xenbus_watch structure
-+ * for storage, and the given callback function as the callback.  Return 0 on
-+ * success, or -errno on error.  On success, the given path will be saved as
-+ * watch->node, and remains the caller's to free.  On error, watch->node will
-+ * be NULL, the device will switch to XenbusStateClosing, and the error will
-+ * be saved in the store.
-+ */
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+		      struct xenbus_watch *watch,
-+		      void (*callback)(struct xenbus_watch *,
-+				       const char **, unsigned int));
++		spin_unlock_irqrestore(&io_tlb_lock, flags);
++		return NULL;
++	}
++  found:
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
 +
++	/*
++	 * Save away the mapping from the original address to the DMA address.
++	 * This is needed when we sync the memory.  Then we sync the buffer if
++	 * needed.
++	 */
++	slot_buf = buffer;
++	for (i = 0; i < nslots; i++) {
++		slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
++		slot_buf.offset &= PAGE_SIZE - 1;
++		io_tlb_orig_addr[index+i] = slot_buf;
++		slot_buf.offset += 1 << IO_TLB_SHIFT;
++	}
++	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
 +
-+/**
-+ * Register a watch on the given path/path2, using the given xenbus_watch
-+ * structure for storage, and the given callback function as the callback.
-+ * Return 0 on success, or -errno on error.  On success, the watched path
-+ * (path/path2) will be saved as watch->node, and becomes the caller's to
-+ * kfree().  On error, watch->node will be NULL, so the caller has nothing to
-+ * free, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+		       const char *path2, struct xenbus_watch *watch,
-+		       void (*callback)(struct xenbus_watch *,
-+					const char **, unsigned int));
++	return dma_addr;
++}
 +
++static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
++{
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = io_tlb_orig_addr[index];
++	buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
++	buffer.page += buffer.offset >> PAGE_SHIFT;
++	buffer.offset &= PAGE_SIZE - 1;
++	return buffer;
++}
 +
-+/**
-+ * Advertise in the store a change of the given driver to the given new_state.
-+ * Return 0 on success, or -errno on error.  On error, the device will switch
-+ * to XenbusStateClosing, and the error will be saved in the store.
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 + */
-+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++	unsigned long flags;
++	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
 +
++	/*
++	 * First, sync the memory before unmapping the entry
++	 */
++	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
 +
-+/**
-+ * Grant access to the given ring_mfn to the peer of the given device.  Return
-+ * 0 on success, or -errno on error.  On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
++	/*
++	 * Return the buffer to the free list by setting the corresponding
++	 * entries to indicate the number of contigous entries available.
++	 * While returning the entries to the free list, we merge the entries
++	 * with slots below and above the pool being returned.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++			 io_tlb_list[index + nslots] : 0);
++		/*
++		 * Step 1: return the slots to the free list, merging the
++		 * slots with superceeding slots
++		 */
++		for (i = index + nslots - 1; i >= index; i--)
++			io_tlb_list[i] = ++count;
++		/*
++		 * Step 2: merge the returned slots with the preceding slots,
++		 * if available (non zero)
++		 */
++		for (i = index - 1;
++		     (OFFSET(i, IO_TLB_SEGSIZE) !=
++		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++		     i--)
++			io_tlb_list[i] = ++count;
++	}
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
 +
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++	__sync_single(buffer, dma_addr, size, dir);
++}
 +
-+/**
-+ * Map a page of memory into this domain from another domain's grant table.
-+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
-+ * page to that address, and sets *vaddr to that address.
-+ * xenbus_map_ring does not allocate the virtual address space (you must do
-+ * this yourself!). It only maps in the page to the specified address.
-+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
-+ * or -ENOMEM on error. If an error is returned, device will switch to
-+ * XenbusStateClosing and the error message will be saved in XenStore.
-+ */
-+struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
-+					 int gnt_ref);
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+			   grant_handle_t *handle, void *vaddr);
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++	/*
++	 * Ran out of IOMMU space for this operation. This is very bad.
++	 * Unfortunately the drivers cannot handle this operation properly.
++	 * unless they check for pci_dma_mapping_error (most don't)
++	 * When the mapping is small enough return a static buffer to limit
++	 * the damage, or panic when the transfer is too big.
++	 */
++	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
 +
++	if (size > io_tlb_overflow && do_panic) {
++		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Memory would be corrupted\n");
++		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Random memory would be DMAed\n");
++	}
++}
 +
-+/**
-+ * Unmap a page of memory in this domain that was imported from another domain.
-+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
-+ * xenbus_map_ring_valloc (it will free the virtual address space).
-+ * Returns 0 on success and returns GNTST_* on error
-+ * (see xen/include/interface/grant_table.h).
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode.  The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 + */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+		      grant_handle_t handle, void *vaddr);
-+
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++	dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
++			      offset_in_page(ptr);
++	void *map;
++	struct phys_addr buffer;
 +
-+/**
-+ * Allocate an event channel for the given xenbus_device, assigning the newly
-+ * created local port to *port.  Return 0 on success, or -errno on error.  On
-+ * error, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
++	BUG_ON(dir == DMA_NONE);
 +
++	/*
++	 * If the pointer passed in happens to be in the device's DMA window,
++	 * we can safely return the device addr and not worry about bounce
++	 * buffering it.
++	 */
++	if (!range_straddles_page_boundary(__pa(ptr), size) &&
++	    !address_needs_mapping(hwdev, dev_addr))
++		return dev_addr;
 +
-+/**
-+ * Free an existing event channel. Returns 0 on success or -errno on error.
-+ */
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port);
++	/*
++	 * Oh well, have to allocate and map a bounce buffer.
++	 */
++	gnttab_dma_unmap_page(dev_addr);
++	buffer.page   = virt_to_page(ptr);
++	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++	map = map_single(hwdev, buffer, size, dir);
++	if (!map) {
++		swiotlb_full(hwdev, size, dir, 1);
++		map = io_tlb_overflow_buffer;
++	}
 +
++	dev_addr = virt_to_bus(map);
++	return dev_addr;
++}
 +
-+/**
-+ * Return the state of the driver rooted at the given store path, or
-+ * XenbusStateUnknown if no state can be read.
++/*
++ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call.  All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
 + */
-+enum xenbus_state xenbus_read_driver_state(const char *path);
-+
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++		     int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++	else
++		gnttab_dma_unmap_page(dev_addr);
++}
 +
-+/***
-+ * Report the given negative errno into the store, along with the given
-+ * formatted message.
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so.  At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 + */
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...);
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++			    size_t size, int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
 +
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++			       size_t size, int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
 +
-+/***
-+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
-+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
-+ * closedown of this driver and its peer.
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface.  Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length.  They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ *       DMA address/length pairs than there are SG table elements.
++ *       (for example via virtual mapping capabilities)
++ *       The routine returns the number of addr/length pairs actually
++ *       used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
 + */
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...);
-+
-+int xenbus_dev_init(void);
-+
-+const char *xenbus_strstate(enum xenbus_state state);
-+int xenbus_dev_is_online(struct xenbus_device *dev);
-+int xenbus_frontend_closed(struct xenbus_device *dev);
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++	       int dir)
++{
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
++	int i;
 +
-+#endif /* _XEN_XENBUS_H */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/xencons.h tmp-linux-2.6-xen.patch/include/xen/xencons.h
---- pristine-linux-2.6.18.2/include/xen/xencons.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/xencons.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,19 @@
-+#ifndef __ASM_XENCONS_H__
-+#define __ASM_XENCONS_H__
++	BUG_ON(dir == DMA_NONE);
 +
-+struct dom0_vga_console_info;
-+void dom0_init_screen_info(const struct dom0_vga_console_info *info);
++	for (i = 0; i < nelems; i++, sg++) {
++		dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
 +
-+void xencons_force_flush(void);
-+void xencons_resume(void);
++		if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
++						  + sg->offset, sg->length)
++		    || address_needs_mapping(hwdev, dev_addr)) {
++			gnttab_dma_unmap_page(dev_addr);
++			buffer.page   = sg->page;
++			buffer.offset = sg->offset;
++			map = map_single(hwdev, buffer, sg->length, dir);
++			if (!map) {
++				/* Don't panic here, we expect map_sg users
++				   to do proper error handling. */
++				swiotlb_full(hwdev, sg->length, dir, 0);
++				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++				sg[0].dma_length = 0;
++				return 0;
++			}
++			sg->dma_address = (dma_addr_t)virt_to_bus(map);
++		} else
++			sg->dma_address = dev_addr;
++		sg->dma_length = sg->length;
++	}
++	return nelems;
++}
 +
-+/* Interrupt work hooks. Receive data, or kick data out. */
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
-+void xencons_tx(void);
++/*
++ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++		 int dir)
++{
++	int i;
 +
-+int xencons_ring_init(void);
-+int xencons_ring_send(const char *data, unsigned len);
++	BUG_ON(dir == DMA_NONE);
 +
-+void xencons_early_setup(void);
++	for (i = 0; i < nelems; i++, sg++)
++		if (in_swiotlb_aperture(sg->dma_address))
++			unmap_single(hwdev, 
++				     (void *)bus_to_virt(sg->dma_address),
++				     sg->dma_length, dir);
++		else
++			gnttab_dma_unmap_page(sg->dma_address);
++}
 +
-+#endif /* __ASM_XENCONS_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/xenoprof.h tmp-linux-2.6-xen.patch/include/xen/xenoprof.h
---- pristine-linux-2.6.18.2/include/xen/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/xenoprof.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,42 @@
-+/******************************************************************************
-+ * xen/xenoprof.h
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ *                    VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
 + *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
 + */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++			int nelems, int dir)
++{
++	int i;
 +
-+#ifndef __XEN_XENOPROF_H__
-+#define __XEN_XENOPROF_H__
-+#ifdef CONFIG_XEN
-+
-+#include <asm/xenoprof.h>
++	BUG_ON(dir == DMA_NONE);
 +
-+struct oprofile_operations;
-+int xenoprofile_init(struct oprofile_operations * ops);
-+void xenoprofile_exit(void);
++	for (i = 0; i < nelems; i++, sg++)
++		if (in_swiotlb_aperture(sg->dma_address))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
++}
 +
-+struct xenoprof_shared_buffer {
-+	char					*buffer;
-+	struct xenoprof_arch_shared_buffer	arch;
-+};
-+#else
-+#define xenoprofile_init(ops)	(-ENOSYS)
-+#define xenoprofile_exit()	do { } while (0)
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++			   int nelems, int dir)
++{
++	int i;
 +
-+#endif /* CONFIG_XEN */
-+#endif /* __XEN_XENOPROF_H__ */
-diff -Nurp pristine-linux-2.6.18.2/include/xen/xen_proc.h tmp-linux-2.6-xen.patch/include/xen/xen_proc.h
---- pristine-linux-2.6.18.2/include/xen/xen_proc.h	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/include/xen/xen_proc.h	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,12 @@
++	BUG_ON(dir == DMA_NONE);
 +
-+#ifndef __ASM_XEN_PROC_H__
-+#define __ASM_XEN_PROC_H__
++	for (i = 0; i < nelems; i++, sg++)
++		if (in_swiotlb_aperture(sg->dma_address))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
++}
 +
-+#include <linux/proc_fs.h>
++#ifdef CONFIG_HIGHMEM
 +
-+extern struct proc_dir_entry *create_xen_proc_entry(
-+	const char *name, mode_t mode);
-+extern void remove_xen_proc_entry(
-+	const char *name);
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++		 unsigned long offset, size_t size,
++		 enum dma_data_direction direction)
++{
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
 +
-+#endif /* __ASM_XEN_PROC_H__ */
-diff -Nurp pristine-linux-2.6.18.2/kernel/fork.c tmp-linux-2.6-xen.patch/kernel/fork.c
---- pristine-linux-2.6.18.2/kernel/fork.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/fork.c	2007-07-30 16:35:13.000000000 +0200
-@@ -276,6 +276,9 @@ static inline int dup_mmap(struct mm_str
- 		if (retval)
- 			goto out;
- 	}
-+#ifdef arch_dup_mmap
-+	arch_dup_mmap(mm, oldmm);
-+#endif
- 	retval = 0;
- out:
- 	up_write(&mm->mmap_sem);
-diff -Nurp pristine-linux-2.6.18.2/kernel/irq/spurious.c tmp-linux-2.6-xen.patch/kernel/irq/spurious.c
---- pristine-linux-2.6.18.2/kernel/irq/spurious.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/irq/spurious.c	2007-07-30 16:35:13.000000000 +0200
-@@ -139,7 +139,8 @@ void note_interrupt(unsigned int irq, st
- 		    irqreturn_t action_ret, struct pt_regs *regs)
- {
- 	if (unlikely(action_ret != IRQ_HANDLED)) {
--		desc->irqs_unhandled++;
-+		if (!irq_ignore_unhandled(irq))
-+			desc->irqs_unhandled++;
- 		if (unlikely(action_ret != IRQ_NONE))
- 			report_bad_irq(irq, desc, action_ret);
- 	}
-diff -Nurp pristine-linux-2.6.18.2/kernel/Kconfig.preempt tmp-linux-2.6-xen.patch/kernel/Kconfig.preempt
---- pristine-linux-2.6.18.2/kernel/Kconfig.preempt	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/Kconfig.preempt	2007-07-30 16:35:13.000000000 +0200
-@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
- 
- config PREEMPT
- 	bool "Preemptible Kernel (Low-Latency Desktop)"
-+	depends on !XEN
- 	help
- 	  This option reduces the latency of the kernel by making
- 	  all kernel code (that is not executing in a critical section)
-diff -Nurp pristine-linux-2.6.18.2/kernel/kexec.c tmp-linux-2.6-xen.patch/kernel/kexec.c
---- pristine-linux-2.6.18.2/kernel/kexec.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/kexec.c	2007-09-30 18:06:18.000000000 +0200
-@@ -330,13 +330,26 @@ static int kimage_is_destination_range(s
- 	return 0;
- }
- 
--static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
-+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
- {
- 	struct page *pages;
- 
- 	pages = alloc_pages(gfp_mask, order);
- 	if (pages) {
- 		unsigned int count, i;
-+#ifdef CONFIG_XEN
-+		int address_bits;
++	dev_addr = gnttab_dma_map_page(page) + offset;
++	if (address_needs_mapping(hwdev, dev_addr)) {
++		gnttab_dma_unmap_page(dev_addr);
++		buffer.page   = page;
++		buffer.offset = offset;
++		map = map_single(hwdev, buffer, size, direction);
++		if (!map) {
++			swiotlb_full(hwdev, size, direction, 1);
++			map = io_tlb_overflow_buffer;
++		}
++		dev_addr = (dma_addr_t)virt_to_bus(map);
++	}
 +
-+		if (limit == ~0UL)
-+			address_bits = BITS_PER_LONG;
-+		else
-+			address_bits = long_log2(limit);
++	return dev_addr;
++}
 +
-+		if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
-+			__free_pages(pages, order);
-+			return NULL;
-+		}
-+#endif
- 		pages->mapping = NULL;
- 		set_page_private(pages, order);
- 		count = 1 << order;
-@@ -355,6 +368,9 @@ static void kimage_free_pages(struct pag
- 	count = 1 << order;
- 	for (i = 0; i < count; i++)
- 		ClearPageReserved(page + i);
-+#ifdef CONFIG_XEN
-+	xen_destroy_contiguous_region((unsigned long)page_address(page), order);
-+#endif
- 	__free_pages(page, order);
- }
- 
-@@ -400,10 +416,10 @@ static struct page *kimage_alloc_normal_
- 	do {
- 		unsigned long pfn, epfn, addr, eaddr;
- 
--		pages = kimage_alloc_pages(GFP_KERNEL, order);
-+		pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
- 		if (!pages)
- 			break;
--		pfn   = page_to_pfn(pages);
-+		pfn   = kexec_page_to_pfn(pages);
- 		epfn  = pfn + count;
- 		addr  = pfn << PAGE_SHIFT;
- 		eaddr = epfn << PAGE_SHIFT;
-@@ -437,6 +453,7 @@ static struct page *kimage_alloc_normal_
- 	return pages;
- }
- 
-+#ifndef CONFIG_XEN
- static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
- 						      unsigned int order)
- {
-@@ -490,7 +507,7 @@ static struct page *kimage_alloc_crash_c
- 		}
- 		/* If I don't overlap any segments I have found my hole! */
- 		if (i == image->nr_segments) {
--			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
-+			pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
- 			break;
- 		}
- 	}
-@@ -517,6 +534,13 @@ struct page *kimage_alloc_control_pages(
- 
- 	return pages;
- }
-+#else /* !CONFIG_XEN */
-+struct page *kimage_alloc_control_pages(struct kimage *image,
-+					 unsigned int order)
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++		   size_t size, enum dma_data_direction direction)
 +{
-+	return kimage_alloc_normal_control_pages(image, order);
++	BUG_ON(direction == DMA_NONE);
++	if (in_swiotlb_aperture(dma_address))
++		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++	else
++		gnttab_dma_unmap_page(dma_address);
 +}
-+#endif
- 
- static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
- {
-@@ -532,7 +556,7 @@ static int kimage_add_entry(struct kimag
- 			return -ENOMEM;
- 
- 		ind_page = page_address(page);
--		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
-+		*image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
- 		image->entry = ind_page;
- 		image->last_entry = ind_page +
- 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
-@@ -593,13 +617,13 @@ static int kimage_terminate(struct kimag
- #define for_each_kimage_entry(image, ptr, entry) \
- 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
- 		ptr = (entry & IND_INDIRECTION)? \
--			phys_to_virt((entry & PAGE_MASK)): ptr +1)
-+			kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
- 
- static void kimage_free_entry(kimage_entry_t entry)
- {
- 	struct page *page;
- 
--	page = pfn_to_page(entry >> PAGE_SHIFT);
-+	page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
- 	kimage_free_pages(page);
- }
- 
-@@ -611,6 +635,10 @@ static void kimage_free(struct kimage *i
- 	if (!image)
- 		return;
- 
-+#ifdef CONFIG_XEN
-+	xen_machine_kexec_unload(image);
++
 +#endif
 +
- 	kimage_free_extra_pages(image);
- 	for_each_kimage_entry(image, ptr, entry) {
- 		if (entry & IND_INDIRECTION) {
-@@ -686,7 +714,7 @@ static struct page *kimage_alloc_page(st
- 	 * have a match.
- 	 */
- 	list_for_each_entry(page, &image->dest_pages, lru) {
--		addr = page_to_pfn(page) << PAGE_SHIFT;
-+		addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
- 		if (addr == destination) {
- 			list_del(&page->lru);
- 			return page;
-@@ -697,16 +725,16 @@ static struct page *kimage_alloc_page(st
- 		kimage_entry_t *old;
- 
- 		/* Allocate a page, if we run out of memory give up */
--		page = kimage_alloc_pages(gfp_mask, 0);
-+		page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
- 		if (!page)
- 			return NULL;
- 		/* If the page cannot be used file it away */
--		if (page_to_pfn(page) >
-+		if (kexec_page_to_pfn(page) >
- 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
- 			list_add(&page->lru, &image->unuseable_pages);
- 			continue;
- 		}
--		addr = page_to_pfn(page) << PAGE_SHIFT;
-+		addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
- 
- 		/* If it is the destination page we want use it */
- 		if (addr == destination)
-@@ -729,7 +757,7 @@ static struct page *kimage_alloc_page(st
- 			struct page *old_page;
- 
- 			old_addr = *old & PAGE_MASK;
--			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
-+			old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
- 			copy_highpage(page, old_page);
- 			*old = addr | (*old & ~PAGE_MASK);
- 
-@@ -779,7 +807,7 @@ static int kimage_load_normal_segment(st
- 			result  = -ENOMEM;
- 			goto out;
- 		}
--		result = kimage_add_page(image, page_to_pfn(page)
-+		result = kimage_add_page(image, kexec_page_to_pfn(page)
- 								<< PAGE_SHIFT);
- 		if (result < 0)
- 			goto out;
-@@ -811,6 +839,7 @@ out:
- 	return result;
- }
- 
-+#ifndef CONFIG_XEN
- static int kimage_load_crash_segment(struct kimage *image,
- 					struct kexec_segment *segment)
- {
-@@ -833,7 +862,7 @@ static int kimage_load_crash_segment(str
- 		char *ptr;
- 		size_t uchunk, mchunk;
- 
--		page = pfn_to_page(maddr >> PAGE_SHIFT);
-+		page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
- 		if (page == 0) {
- 			result  = -ENOMEM;
- 			goto out;
-@@ -881,6 +910,13 @@ static int kimage_load_segment(struct ki
- 
- 	return result;
- }
-+#else /* CONFIG_XEN */
-+static int kimage_load_segment(struct kimage *image,
-+				struct kexec_segment *segment)
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
 +{
-+	return kimage_load_normal_segment(image, segment);
++	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
 +}
-+#endif
- 
- /*
-  * Exec Kernel system call: for obvious reasons only root may call it.
-@@ -991,6 +1027,13 @@ asmlinkage long sys_kexec_load(unsigned 
- 		if (result)
- 			goto out;
- 	}
-+#ifdef CONFIG_XEN
-+	if (image) {
-+		result = xen_machine_kexec_load(image);
-+		if (result)
-+			goto out;
-+	}
-+#endif
- 	/* Install the new kernel, and  Uninstall the old */
- 	image = xchg(dest_image, image);
- 
-@@ -1044,7 +1087,6 @@ void crash_kexec(struct pt_regs *regs)
- {
- 	int locked;
- 
--
- 	/* Take the kexec_lock here to prevent sys_kexec_load
- 	 * running on one cpu from replacing the crash kernel
- 	 * we are using after a panic on a different cpu.
-diff -Nurp pristine-linux-2.6.18.2/kernel/softlockup.c tmp-linux-2.6-xen.patch/kernel/softlockup.c
---- pristine-linux-2.6.18.2/kernel/softlockup.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/softlockup.c	2007-10-14 01:51:15.000000000 +0200
-@@ -40,6 +40,19 @@ void touch_softlockup_watchdog(void)
- }
- EXPORT_SYMBOL(touch_softlockup_watchdog);
- 
-+unsigned long softlockup_get_next_event(void)
-+{
-+	int this_cpu = smp_processor_id();
-+	unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
-+
-+	if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
-+		did_panic ||
-+			!per_cpu(watchdog_task, this_cpu))
-+		return MAX_JIFFY_OFFSET;
 +
-+	return max_t(long, 0, touch_timestamp + HZ - jiffies);
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly.  For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++	return (mask >= ((1UL << dma_bits) - 1));
 +}
 +
- /*
-  * This callback runs from the timer interrupt, and checks
-  * whether the watchdog thread has hung or not:
-diff -Nurp pristine-linux-2.6.18.2/kernel/timer.c tmp-linux-2.6-xen.patch/kernel/timer.c
---- pristine-linux-2.6.18.2/kernel/timer.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/kernel/timer.c	2007-10-14 01:51:15.000000000 +0200
-@@ -485,7 +485,9 @@ unsigned long next_timer_interrupt(void)
- 		if (hr_expires < 3)
- 			return hr_expires + jiffies;
- 	}
--	hr_expires += jiffies;
-+	hr_expires = min_t(unsigned long,
-+			   softlockup_get_next_event(),
-+			   hr_expires) + jiffies;
- 
- 	base = __get_cpu_var(tvec_bases);
- 	spin_lock(&base->lock);
-diff -Nurp pristine-linux-2.6.18.2/lib/Makefile tmp-linux-2.6-xen.patch/lib/Makefile
---- pristine-linux-2.6.18.2/lib/Makefile	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/lib/Makefile	2007-07-30 16:35:13.000000000 +0200
-@@ -52,6 +52,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
- obj-$(CONFIG_AUDIT_GENERIC) += audit.o
- 
- obj-$(CONFIG_SWIOTLB) += swiotlb.o
-+swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -r d894e36cfc30 -r 0aa021803deb mm/highmem.c
+--- a/mm/highmem.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/mm/highmem.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -141,6 +141,17 @@
  
- hostprogs-y	:= gen_crc32table
- clean-files	:= crc32table.h
-diff -Nurp pristine-linux-2.6.18.2/mm/highmem.c tmp-linux-2.6-xen.patch/mm/highmem.c
---- pristine-linux-2.6.18.2/mm/highmem.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/mm/highmem.c	2007-07-30 16:35:13.000000000 +0200
-@@ -142,6 +142,17 @@ start:
  	return vaddr;
  }
- 
++
 +#ifdef CONFIG_XEN
 +void kmap_flush_unused(void)
 +{
@@ -106330,32 +146163,39 @@
 +
 +EXPORT_SYMBOL(kmap_flush_unused);
 +#endif
-+
+ 
  void fastcall *kmap_high(struct page *page)
  {
- 	unsigned long vaddr;
-diff -Nurp pristine-linux-2.6.18.2/mm/Kconfig tmp-linux-2.6-xen.patch/mm/Kconfig
---- pristine-linux-2.6.18.2/mm/Kconfig	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/mm/Kconfig	2007-07-30 16:35:13.000000000 +0200
-@@ -127,11 +127,14 @@ comment "Memory hotplug is currently inc
- # Default to 4 for wider testing, though 8 might be more appropriate.
- # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
- # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
-+# XEN on x86 architecture uses the mapping field on pagetable pages to store a
-+# pointer to the destructor. This conflicts with pte_lock_deinit().
- #
- config SPLIT_PTLOCK_CPUS
- 	int
- 	default "4096" if ARM && !CPU_CACHE_VIPT
- 	default "4096" if PARISC && !PA20
-+	default "4096" if X86_XEN || X86_64_XEN
- 	default "4"
+@@ -457,6 +468,12 @@
+ 	mempool_t *pool;
  
- #
-diff -Nurp pristine-linux-2.6.18.2/mm/memory.c tmp-linux-2.6-xen.patch/mm/memory.c
---- pristine-linux-2.6.18.2/mm/memory.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/mm/memory.c	2007-07-30 16:35:13.000000000 +0200
-@@ -403,7 +403,8 @@ struct page *vm_normal_page(struct vm_ar
+ 	/*
++	 * Data-less bio, nothing to bounce
++	 */
++	if (bio_empty_barrier(*bio_orig))
++		return;
++
++	/*
+ 	 * for non-isa bounce case, just check if the bounce pfn is equal
+ 	 * to or bigger than the highest pfn in the system -- in that case,
+ 	 * don't waste time iterating over bio segments
+diff -r d894e36cfc30 -r 0aa021803deb mm/memory.c
+--- a/mm/memory.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/mm/memory.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -396,6 +396,12 @@
+ 			return NULL;
+ 	}
+ 
++#if defined(CONFIG_XEN) && defined(CONFIG_X86)
++	/* XEN: Covers user-space grant mappings (even of local pages). */
++	if (unlikely(vma->vm_flags & VM_FOREIGN))
++		return NULL;
++#endif
++
+ 	/*
+ 	 * Add some anal sanity checks for now. Eventually,
+ 	 * we should just do "return pfn_to_page(pfn)", but
+@@ -403,7 +409,8 @@
  	 * and that the resulting page looks ok.
  	 */
  	if (unlikely(!pfn_valid(pfn))) {
@@ -106365,7 +146205,7 @@
  		return NULL;
  	}
  
-@@ -658,8 +659,12 @@ static unsigned long zap_pte_range(struc
+@@ -658,8 +665,12 @@
  				     page->index > details->last_index))
  					continue;
  			}
@@ -106380,7 +146220,7 @@
  			tlb_remove_tlb_entry(tlb, pte, addr);
  			if (unlikely(!page))
  				continue;
-@@ -754,6 +759,7 @@ static unsigned long unmap_page_range(st
+@@ -754,6 +765,7 @@
  		details = NULL;
  
  	BUG_ON(addr >= end);
@@ -106388,7 +146228,7 @@
  	tlb_start_vma(tlb, vma);
  	pgd = pgd_offset(vma->vm_mm, addr);
  	do {
-@@ -891,6 +897,7 @@ unsigned long zap_page_range(struct vm_a
+@@ -891,6 +903,7 @@
  		tlb_finish_mmu(tlb, address, end);
  	return end;
  }
@@ -106396,7 +146236,7 @@
  
  /*
   * Do a quick page-table lookup for a single page.
-@@ -1030,6 +1037,26 @@ int get_user_pages(struct task_struct *t
+@@ -1030,6 +1043,26 @@
  			continue;
  		}
  
@@ -106423,10 +146263,11 @@
  		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
  				|| !(vm_flags & vma->vm_flags))
  			return i ? : -EFAULT;
-@@ -1369,6 +1396,102 @@ int remap_pfn_range(struct vm_area_struc
+@@ -1378,6 +1411,102 @@
+ 	return err;
  }
  EXPORT_SYMBOL(remap_pfn_range);
- 
++
 +#ifdef CONFIG_XEN
 +static inline int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 +				     unsigned long addr, unsigned long end,
@@ -106522,30 +146363,13 @@
 +}
 +EXPORT_SYMBOL_GPL(apply_to_page_range);
 +#endif
-+
+ 
  /*
   * handle_pte_fault chooses page fault handler according to an entry
-  * which was read non-atomically.  Before making any commitment, on
-@@ -1551,14 +1674,7 @@ gotten:
- 		entry = mk_pte(new_page, vma->vm_page_prot);
- 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- 		lazy_mmu_prot_update(entry);
--		/*
--		 * Clear the pte entry and flush it first, before updating the
--		 * pte with the new entry. This will avoid a race condition
--		 * seen in the presence of one thread doing SMC and another
--		 * thread doing COW.
--		 */
--		ptep_clear_flush(vma, address, page_table);
--		set_pte_at(mm, address, page_table, entry);
-+		ptep_establish(vma, address, page_table, entry);
- 		update_mmu_cache(vma, address, entry);
- 		lru_cache_add_active(new_page);
- 		page_add_new_anon_rmap(new_page, vma, address);
-diff -Nurp pristine-linux-2.6.18.2/mm/mmap.c tmp-linux-2.6-xen.patch/mm/mmap.c
---- pristine-linux-2.6.18.2/mm/mmap.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/mm/mmap.c	2007-07-30 16:35:13.000000000 +0200
-@@ -1963,6 +1963,10 @@ void exit_mmap(struct mm_struct *mm)
+diff -r d894e36cfc30 -r 0aa021803deb mm/mmap.c
+--- a/mm/mmap.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/mm/mmap.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -1970,6 +1970,10 @@
  	unsigned long nr_accounted = 0;
  	unsigned long end;
  
@@ -106556,10 +146380,22 @@
  	lru_add_drain();
  	flush_cache_mm(mm);
  	tlb = tlb_gather_mmu(mm, 1);
-diff -Nurp pristine-linux-2.6.18.2/mm/page_alloc.c tmp-linux-2.6-xen.patch/mm/page_alloc.c
---- pristine-linux-2.6.18.2/mm/page_alloc.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/mm/page_alloc.c	2007-07-30 21:01:00.000000000 +0200
-@@ -154,7 +154,11 @@ static void bad_page(struct page *page)
+diff -r d894e36cfc30 -r 0aa021803deb mm/mprotect.c
+--- a/mm/mprotect.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/mm/mprotect.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -75,6 +75,8 @@
+ 	do {
+ 		next = pmd_addr_end(addr, end);
+ 		if (pmd_none_or_clear_bad(pmd))
++			continue;
++		if (arch_change_pte_range(mm, pmd, addr, next, newprot))
+ 			continue;
+ 		change_pte_range(mm, pmd, addr, next, newprot);
+ 	} while (pmd++, addr = next, addr != end);
+diff -r d894e36cfc30 -r 0aa021803deb mm/page_alloc.c
+--- a/mm/page_alloc.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/mm/page_alloc.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -154,7 +154,11 @@
  			1 << PG_slab    |
  			1 << PG_swapcache |
  			1 << PG_writeback |
@@ -106572,7 +146408,7 @@
  	set_page_count(page, 0);
  	reset_page_mapcount(page);
  	page->mapping = NULL;
-@@ -389,7 +393,11 @@ static inline int free_pages_check(struc
+@@ -389,7 +393,11 @@
  			1 << PG_swapcache |
  			1 << PG_writeback |
  			1 << PG_reserved |
@@ -106585,17 +146421,20 @@
  		bad_page(page);
  	if (PageDirty(page))
  		__ClearPageDirty(page);
-@@ -443,7 +451,8 @@ static void __free_pages_ok(struct page 
+@@ -443,6 +451,12 @@
  	int i;
  	int reserved = 0;
  
--	arch_free_page(page, order);
-+	if (arch_free_page(page, order))
++#ifdef CONFIG_XEN
++	if (PageForeign(page)) {
++		PageForeignDestructor(page);
 +		return;
++	}
++#endif
+ 	arch_free_page(page, order);
  	if (!PageHighMem(page))
  		debug_check_no_locks_freed(page_address(page),
- 					   PAGE_SIZE<<order);
-@@ -538,7 +547,11 @@ static int prep_new_page(struct page *pa
+@@ -538,7 +552,11 @@
  			1 << PG_swapcache |
  			1 << PG_writeback |
  			1 << PG_reserved |
@@ -106608,63 +146447,39 @@
  		bad_page(page);
  
  	/*
-@@ -717,7 +730,8 @@ static void fastcall free_hot_cold_page(
+@@ -717,6 +735,12 @@
  	struct per_cpu_pages *pcp;
  	unsigned long flags;
  
--	arch_free_page(page, 0);
-+	if (arch_free_page(page, 0))
++#ifdef CONFIG_XEN
++	if (PageForeign(page)) {
++		PageForeignDestructor(page);
 +		return;
++	}
++#endif
+ 	arch_free_page(page, 0);
  
  	if (PageAnon(page))
- 		page->mapping = NULL;
-@@ -1847,10 +1861,8 @@ static inline void free_zone_pagesets(in
- 	for_each_zone(zone) {
- 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
- 
--		/* Free per_cpu_pageset if it is slab allocated */
--		if (pset != &boot_pageset[cpu])
--			kfree(pset);
- 		zone_pcp(zone, cpu) = NULL;
-+		kfree(pset);
- 	}
- }
+diff -r d894e36cfc30 -r 0aa021803deb net/bridge/br_netfilter.c
+--- a/net/bridge/br_netfilter.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/bridge/br_netfilter.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -127,10 +127,10 @@
  
-@@ -2012,7 +2024,6 @@ static void __meminit free_area_init_cor
- #ifdef CONFIG_NUMA
- 		zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
- 						/ 100;
--		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
- #endif
- 		zone->name = zone_names[j];
- 		spin_lock_init(&zone->lock);
-@@ -2323,22 +2334,6 @@ int sysctl_min_unmapped_ratio_sysctl_han
- 				sysctl_min_unmapped_ratio) / 100;
- 	return 0;
- }
--
--int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
--	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
--{
--	struct zone *zone;
--	int rc;
--
--	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
--	if (rc)
--		return rc;
--
--	for_each_zone(zone)
--		zone->min_slab_pages = (zone->present_pages *
--				sysctl_min_slab_ratio) / 100;
--	return 0;
--}
- #endif
+ static inline void nf_bridge_save_header(struct sk_buff *skb)
+ {
+-        int header_size = 16;
++        int header_size = ETH_HLEN;
  
- /*
-diff -Nurp pristine-linux-2.6.18.2/net/core/dev.c tmp-linux-2.6-xen.patch/net/core/dev.c
---- pristine-linux-2.6.18.2/net/core/dev.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/core/dev.c	2007-07-30 16:35:13.000000000 +0200
-@@ -113,11 +113,18 @@
+ 	if (skb->protocol == htons(ETH_P_8021Q))
+-		header_size = 18;
++		header_size += VLAN_HLEN;
+ 
+ 	memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
+ }
+diff -r d894e36cfc30 -r 0aa021803deb net/core/dev.c
+--- a/net/core/dev.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/core/dev.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -113,10 +113,17 @@
  #include <linux/wireless.h>
  #include <net/iw_handler.h>
  #include <asm/current.h>
@@ -106673,17 +146488,16 @@
  #include <linux/dmaengine.h>
  #include <linux/err.h>
  #include <linux/ctype.h>
- 
++
 +#ifdef CONFIG_XEN
 +#include <net/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/udp.h>
 +#endif
-+
+ 
  /*
   *	The list of packet types we will receive (as opposed to discard)
-  *	and the routines to invoke.
-@@ -1328,7 +1335,7 @@ static int dev_gso_segment(struct sk_buf
+@@ -1328,7 +1335,7 @@
  	/* Verifying header integrity only. */
  	if (!segs)
  		return 0;
@@ -106692,7 +146506,7 @@
  	if (unlikely(IS_ERR(segs)))
  		return PTR_ERR(segs);
  
-@@ -1391,6 +1398,43 @@ out_kfree_skb:
+@@ -1391,6 +1398,43 @@
  	}						\
  }
  
@@ -106736,46 +146550,26 @@
  /**
   *	dev_queue_xmit - transmit a buffer
   *	@skb: buffer to transmit
-@@ -1423,6 +1467,12 @@ int dev_queue_xmit(struct sk_buff *skb)
+@@ -1422,6 +1466,12 @@
+ 	struct net_device *dev = skb->dev;
  	struct Qdisc *q;
  	int rc = -ENOMEM;
- 
++
 + 	/* If a checksum-deferred packet is forwarded to a device that needs a
 + 	 * checksum, correct the pointers and force checksumming.
 + 	 */
 + 	if (skb_checksum_setup(skb))
 + 		goto out_kfree_skb;
-+
+ 
  	/* GSO will handle the following emulations directly. */
  	if (netif_needs_gso(dev, skb))
- 		goto gso;
-@@ -1478,16 +1528,14 @@ gso:
- 	if (q->enqueue) {
- 		/* Grab device queue */
- 		spin_lock(&dev->queue_lock);
--		q = dev->qdisc;
--		if (q->enqueue) {
--			rc = q->enqueue(skb, q);
--			qdisc_run(dev);
--			spin_unlock(&dev->queue_lock);
- 
--			rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
--			goto out;
--		}
-+		rc = q->enqueue(skb, q);
-+
-+		qdisc_run(dev);
+@@ -1795,6 +1845,19 @@
+ 	if (skb->tc_verd & TC_NCLS) {
+ 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
+ 		goto ncls;
++	}
++#endif
 +
- 		spin_unlock(&dev->queue_lock);
-+		rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
-+		goto out;
- 	}
- 
- 	/* The device has no queue. Common case for software devices:
-@@ -1798,6 +1846,19 @@ int netif_receive_skb(struct sk_buff *sk
- 	}
- #endif
- 
 +#ifdef CONFIG_XEN
 +	switch (skb->ip_summed) {
 +	case CHECKSUM_UNNECESSARY:
@@ -106786,13 +146580,10 @@
 +	default:
 +		skb->proto_data_valid = 0;
 +		break;
-+	}
-+#endif
-+
- 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
- 		if (!ptype->dev || ptype->dev == skb->dev) {
- 			if (pt_prev) 
-@@ -3584,6 +3645,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
+ 	}
+ #endif
+ 
+@@ -3584,6 +3647,7 @@
  EXPORT_SYMBOL(net_enable_timestamp);
  EXPORT_SYMBOL(net_disable_timestamp);
  EXPORT_SYMBOL(dev_get_flags);
@@ -106800,10 +146591,21 @@
  
  #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  EXPORT_SYMBOL(br_handle_frame_hook);
-diff -Nurp pristine-linux-2.6.18.2/net/core/skbuff.c tmp-linux-2.6-xen.patch/net/core/skbuff.c
---- pristine-linux-2.6.18.2/net/core/skbuff.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/core/skbuff.c	2007-07-30 16:35:13.000000000 +0200
-@@ -240,6 +240,7 @@ struct sk_buff *alloc_skb_from_cache(kme
+diff -r d894e36cfc30 -r 0aa021803deb net/core/neighbour.c
+--- a/net/core/neighbour.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/core/neighbour.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -2679,7 +2679,6 @@
+ EXPORT_SYMBOL(neigh_table_init);
+ EXPORT_SYMBOL(neigh_table_init_no_netlink);
+ EXPORT_SYMBOL(neigh_update);
+-EXPORT_SYMBOL(neigh_update_hhs);
+ EXPORT_SYMBOL(pneigh_enqueue);
+ EXPORT_SYMBOL(pneigh_lookup);
+ EXPORT_SYMBOL(neightbl_dump_info);
+diff -r d894e36cfc30 -r 0aa021803deb net/core/skbuff.c
+--- a/net/core/skbuff.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/core/skbuff.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -240,6 +240,7 @@
  	skb_shinfo(skb)->gso_size = 0;
  	skb_shinfo(skb)->gso_segs = 0;
  	skb_shinfo(skb)->gso_type = 0;
@@ -106811,7 +146613,7 @@
  	skb_shinfo(skb)->frag_list = NULL;
  out:
  	return skb;
-@@ -464,6 +465,10 @@ struct sk_buff *skb_clone(struct sk_buff
+@@ -464,6 +465,10 @@
  	C(local_df);
  	n->cloned = 1;
  	n->nohdr = 0;
@@ -106822,10 +146624,10 @@
  	C(pkt_type);
  	C(ip_summed);
  	C(priority);
-diff -Nurp pristine-linux-2.6.18.2/net/ipv4/netfilter/ip_nat_proto_tcp.c tmp-linux-2.6-xen.patch/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- pristine-linux-2.6.18.2/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/ipv4/netfilter/ip_nat_proto_tcp.c	2007-10-14 01:51:15.000000000 +0200
-@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
+diff -r d894e36cfc30 -r 0aa021803deb net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -129,7 +129,12 @@
  	if (hdrsize < sizeof(*hdr))
  		return 1;
  
@@ -106839,10 +146641,10 @@
  					ip_nat_cheat_check(oldport ^ 0xFFFF,
  							   newport,
  							   hdr->check));
-diff -Nurp pristine-linux-2.6.18.2/net/ipv4/netfilter/ip_nat_proto_udp.c tmp-linux-2.6-xen.patch/net/ipv4/netfilter/ip_nat_proto_udp.c
---- pristine-linux-2.6.18.2/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/ipv4/netfilter/ip_nat_proto_udp.c	2007-10-14 01:51:15.000000000 +0200
-@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
+diff -r d894e36cfc30 -r 0aa021803deb net/ipv4/netfilter/ip_nat_proto_udp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_udp.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -113,11 +113,17 @@
  		newport = tuple->dst.u.udp.port;
  		portptr = &hdr->dest;
  	}
@@ -106862,10 +146664,10 @@
  	*portptr = newport;
  	return 1;
  }
-diff -Nurp pristine-linux-2.6.18.2/net/ipv4/tcp_input.c tmp-linux-2.6-xen.patch/net/ipv4/tcp_input.c
---- pristine-linux-2.6.18.2/net/ipv4/tcp_input.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/ipv4/tcp_input.c	2007-10-14 01:51:15.000000000 +0200
-@@ -127,7 +127,7 @@ static void tcp_measure_rcv_mss(struct s
+diff -r d894e36cfc30 -r 0aa021803deb net/ipv4/tcp_input.c
+--- a/net/ipv4/tcp_input.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/ipv4/tcp_input.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -127,7 +127,7 @@
  	/* skb->len may jitter because of SACKs, even if peer
  	 * sends good full-sized frames.
  	 */
@@ -106874,19 +146676,19 @@
  	if (len >= icsk->icsk_ack.rcv_mss) {
  		icsk->icsk_ack.rcv_mss = len;
  	} else {
-diff -Nurp pristine-linux-2.6.18.2/net/ipv4/xfrm4_output.c tmp-linux-2.6-xen.patch/net/ipv4/xfrm4_output.c
---- pristine-linux-2.6.18.2/net/ipv4/xfrm4_output.c	2006-11-04 02:33:58.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/net/ipv4/xfrm4_output.c	2007-10-14 01:51:15.000000000 +0200
-@@ -18,6 +18,8 @@
+diff -r d894e36cfc30 -r 0aa021803deb net/ipv4/xfrm4_output.c
+--- a/net/ipv4/xfrm4_output.c	Tue Sep 09 11:37:38 2008 +0200
++++ b/net/ipv4/xfrm4_output.c	Wed Sep 10 10:54:08 2008 +0100
+@@ -17,6 +17,8 @@
+ #include <net/ip.h>
  #include <net/xfrm.h>
  #include <net/icmp.h>
- 
-+extern int skb_checksum_setup(struct sk_buff *skb);
 +
++extern int skb_checksum_setup(struct sk_buff *skb);
+ 
  static int xfrm4_tunnel_check_size(struct sk_buff *skb)
  {
- 	int mtu, ret = 0;
-@@ -48,6 +50,10 @@ static int xfrm4_output_one(struct sk_bu
+@@ -48,6 +50,10 @@
  	struct xfrm_state *x = dst->xfrm;
  	int err;
  	
@@ -106897,21 +146699,79 @@
  	if (skb->ip_summed == CHECKSUM_HW) {
  		err = skb_checksum_help(skb, 0);
  		if (err)
-diff -Nurp pristine-linux-2.6.18.2/scripts/Makefile.xen tmp-linux-2.6-xen.patch/scripts/Makefile.xen
---- pristine-linux-2.6.18.2/scripts/Makefile.xen	1970-01-01 01:00:00.000000000 +0100
-+++ tmp-linux-2.6-xen.patch/scripts/Makefile.xen	2007-07-30 16:35:13.000000000 +0200
-@@ -0,0 +1,14 @@
+diff -r d894e36cfc30 -r 0aa021803deb scripts/Makefile.build
+--- a/scripts/Makefile.build	Tue Sep 09 11:37:38 2008 +0200
++++ b/scripts/Makefile.build	Wed Sep 10 10:54:08 2008 +0100
+@@ -66,6 +66,18 @@
+ 
+ ifndef obj
+ $(warning kbuild: Makefile.build is included improperly)
++endif
++
++ifeq ($(CONFIG_XEN),y)
++$(objtree)/scripts/Makefile.xen: $(srctree)/scripts/Makefile.xen.awk $(srctree)/scripts/Makefile.build
++	@echo '  Updating $@'
++	@$(AWK) -f $< $(filter-out $<,$^) >$@
++
++xen-src-single-used-m	:= $(patsubst $(srctree)/%,%,$(wildcard $(addprefix $(srctree)/,$(single-used-m:.o=-xen.c))))
++xen-single-used-m	:= $(xen-src-single-used-m:-xen.c=.o)
++single-used-m		:= $(filter-out $(xen-single-used-m),$(single-used-m))
++
++-include $(objtree)/scripts/Makefile.xen
+ endif
+ 
+ # ===========================================================================
+diff -r d894e36cfc30 -r 0aa021803deb scripts/Makefile.lib
+--- a/scripts/Makefile.lib	Tue Sep 09 11:37:38 2008 +0200
++++ b/scripts/Makefile.lib	Wed Sep 10 10:54:08 2008 +0100
+@@ -12,6 +12,12 @@
+ # Filter out objects already built-in
+ 
+ lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
++
++# Remove objects forcibly disabled
++
++obj-y := $(filter-out $(disabled-obj-y),$(obj-y))
++obj-m := $(filter-out $(disabled-obj-y),$(obj-m))
++lib-y := $(filter-out $(disabled-obj-y),$(lib-y))
+ 
+ 
+ # Handle objects in subdirs
+diff -r d894e36cfc30 -r 0aa021803deb scripts/Makefile.xen.awk
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/scripts/Makefile.xen.awk	Wed Sep 10 10:54:08 2008 +0100
+@@ -0,0 +1,34 @@
++BEGIN {
++	is_rule = 0
++}
++
++/^[[:space:]]*#/ {
++	next
++}
++
++/^[[:space:]]*$/ {
++	if (is_rule)
++		print("")
++	is_rule = 0
++	next
++}
++
++/:[[:space:]]*%\.[cS][[:space:]]/ {
++	line = gensub(/%.([cS])/, "%-xen.\\1", "g", $0)
++	line = gensub(/(single-used-m)/, "xen-\\1", "g", line)
++	print line
++	is_rule = 1
++	next
++}
++
++/^[^\t]$/ {
++	if (is_rule)
++		print("")
++	is_rule = 0
++	next
++}
 +
-+# cherrypickxen($1 = allobj)
-+cherrypickxen = $(foreach var, $(1), \
-+		$(shell o=$(var); \
-+			c=$${o%.o}-xen.c; \
-+			s=$${o%.o}-xen.S; \
-+			oxen=$${o%.o}-xen.o; \
-+			[ -f $(srctree)/$(src)/$${c} ] || \
-+			   [ -f $(srctree)/$(src)/$${s} ] \
-+				&& echo $$oxen \
-+				|| echo $(var) ) \
-+	  )
-+# filterxen($1 = allobj, $2 = noobjs)
-+filterxen = $(filter-out $(2), $(1))
++is_rule {
++	print $0
++	next
++}

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/series/1-extra
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/series/1-extra	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/patches-update/series/1-extra	Sat Oct 18 14:44:52 2008
@@ -4,7 +4,7 @@
 + ../patches-base/features/all/vserver/vs2.0.2.2-rc9.patch featureset=xen-vserver
 + ../patches-base/features/all/vserver/bindmount-dev.patch featureset=xen-vserver
 + features/all/xen/vserver-clash.patch featureset=xen-vserver
-+ features/all/xen/xen-3.1-15467.patch
++ features/all/xen/xen-748f324a4b2d62d89fe40c4aa52861977e1a2cae.patch
 + features/all/xen/remove-4gb-warning.patch arch=i386
 + features/all/xen/console-hvc-overtake.patch
 + features/all/xen/vserver-update.patch featureset=xen-vserver

Modified: people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/rules.real
==============================================================================
--- people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/rules.real	(original)
+++ people/waldi/xen-extra/linux-2.6.18-xen-3.3/debian/rules.real	Sat Oct 18 14:44:52 2008
@@ -363,7 +363,7 @@
 	dh_clean -d -k
 	dh_installdirs -p$(PACKAGE_NAME) boot 'var/lib/$(PACKAGE_NAME)'
 	dh_installdirs -p$(MODULES_PACKAGE_NAME) boot
-	cp '$(DIR)'/vmlinuz $(INSTALL_DIR)/vmlinuz-$(REAL_VERSION)
+	cp '$(DIR)'/arch/$(KERNEL_ARCH)/boot/vmlinuz $(INSTALL_DIR)/vmlinuz-$(REAL_VERSION)
 	$(MAKE_CLEAN) -C $(DIR) modules_install INSTALL_MOD_PATH='$(CURDIR)'/$(MODULES_PACKAGE_DIR)
 	cp $(DIR)/.config $(MODULES_PACKAGE_DIR)/boot/config-$(REAL_VERSION)
 	cp $(DIR)/System.map $(MODULES_PACKAGE_DIR)/boot/System.map-$(REAL_VERSION)



More information about the Kernel-svn-changes mailing list